You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2014/08/20 20:46:27 UTC
svn commit: r1619200 - in /lucene/dev/trunk/solr: CHANGES.txt
core/src/java/org/apache/solr/update/HdfsTransactionLog.java
Author: markrmiller
Date: Wed Aug 20 18:46:27 2014
New Revision: 1619200
URL: http://svn.apache.org/r1619200
Log:
SOLR-6393: TransactionLog replay performance on HDFS is very poor.
Modified:
lucene/dev/trunk/solr/CHANGES.txt
lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
Modified: lucene/dev/trunk/solr/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/CHANGES.txt?rev=1619200&r1=1619199&r2=1619200&view=diff
==============================================================================
--- lucene/dev/trunk/solr/CHANGES.txt (original)
+++ lucene/dev/trunk/solr/CHANGES.txt Wed Aug 20 18:46:27 2014
@@ -296,6 +296,8 @@ Bug Fixes
* SOLR-6378: Fixed example/example-DIH/ issues with "tika" and "solr" configurations, and tidied up README.txt
(Daniel Shchyokin via ehatcher)
+
+* SOLR-6393: TransactionLog replay performance on HDFS is very poor. (Mark Miller)
Optimizations
---------------------
Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java?rev=1619200&r1=1619199&r2=1619200&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java Wed Aug 20 18:46:27 2014
@@ -341,12 +341,14 @@ public class HdfsTransactionLog extends
public class HDFSLogReader extends LogReader{
FSDataFastInputStream fis;
private LogCodec codec = new LogCodec(resolver);
+ private long sz;
public HDFSLogReader(long startingPos) {
super();
incref();
try {
FSDataInputStream fdis = fs.open(tlogFile);
+ sz = fs.getFileStatus(tlogFile).getLen();
fis = new FSDataFastInputStream(fdis, startingPos);
} catch (IOException e) {
throw new RuntimeException(e);
@@ -361,7 +363,6 @@ public class HdfsTransactionLog extends
public Object next() throws IOException, InterruptedException {
long pos = fis.position();
-
synchronized (HdfsTransactionLog.this) {
if (trace) {
log.trace("Reading log record. pos="+pos+" currentSize="+fos.size());
@@ -372,18 +373,22 @@ public class HdfsTransactionLog extends
}
fos.flushBuffer();
- tlogOutStream.hflush();
-
- // we actually need a new reader
+ }
+
+ // we actually need a new reader to
+ // see if any data was added by the writer
+ if (fis.position() >= sz) {
fis.close();
+ tlogOutStream.hflush();
try {
FSDataInputStream fdis = fs.open(tlogFile);
fis = new FSDataFastInputStream(fdis, pos);
+ sz = fs.getFileStatus(tlogFile).getLen();
} catch (IOException e) {
throw new RuntimeException(e);
}
-
}
+
if (pos == 0) {
readHeader(fis);
@@ -396,7 +401,6 @@ public class HdfsTransactionLog extends
}
}
- tlogOutStream.hflush();
Object o = codec.readVal(fis);
// skip over record size