You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2012/07/19 17:59:32 UTC
svn commit: r1363400 [10/31] - in /lucene/dev/branches/pforcodec_3892: ./
dev-tools/ dev-tools/eclipse/ dev-tools/idea/.idea/
dev-tools/idea/.idea/copyright/ dev-tools/idea/.idea/libraries/
dev-tools/idea/lucene/ dev-tools/maven/ dev-tools/maven/lucene...
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java Thu Jul 19 15:58:54 2012
@@ -27,6 +27,7 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
+import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
@@ -53,7 +54,6 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.InfoStream;
import org.apache.lucene.util.MutableBits;
import org.apache.lucene.util.ThreadInterruptedException;
-import org.apache.lucene.util.TwoPhaseCommit;
/**
An <code>IndexWriter</code> creates and maintains an index.
@@ -276,8 +276,7 @@ public class IndexWriter implements Clos
* the writer nor calling {@link #commit}.
*
* <p>Note that this is functionally equivalent to calling
- * {#flush} and then using {@link IndexReader#open} to
- * open a new reader. But the turnaround time of this
+ * {#flush} and then opening a new reader. But the turnaround time of this
* method should be faster since it avoids the potentially
* costly {@link #commit}.</p>
*
@@ -499,7 +498,6 @@ public class IndexWriter implements Clos
* Obtain a ReadersAndLiveDocs instance from the
* readerPool. If create is true, you must later call
* {@link #release(ReadersAndLiveDocs)}.
- * @throws IOException
*/
public synchronized ReadersAndLiveDocs get(SegmentInfoPerCommit info, boolean create) {
@@ -531,7 +529,7 @@ public class IndexWriter implements Clos
* If the reader isn't being pooled, the segmentInfo's
* delCount is returned.
*/
- public int numDeletedDocs(SegmentInfoPerCommit info) throws IOException {
+ public int numDeletedDocs(SegmentInfoPerCommit info) {
ensureOpen(false);
int delCount = info.getDelCount();
@@ -573,19 +571,13 @@ public class IndexWriter implements Clos
* @param conf
* the configuration settings according to which IndexWriter should
* be initialized.
- * @throws CorruptIndexException
- * if the index is corrupt
- * @throws LockObtainFailedException
- * if another writer has this index open (<code>write.lock</code>
- * could not be obtained)
* @throws IOException
* if the directory cannot be read/written to, or if it does not
* exist and <code>conf.getOpenMode()</code> is
* <code>OpenMode.APPEND</code> or if there is any other low-level
* IO error
*/
- public IndexWriter(Directory d, IndexWriterConfig conf)
- throws CorruptIndexException, LockObtainFailedException, IOException {
+ public IndexWriter(Directory d, IndexWriterConfig conf) throws IOException {
config = new LiveIndexWriterConfig(conf.clone());
directory = d;
analyzer = config.getAnalyzer();
@@ -761,7 +753,7 @@ public class IndexWriter implements Clos
return config;
}
- private void messageState() throws IOException {
+ private void messageState() {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "\ndir=" + directory + "\n" +
"index=" + segString() + "\n" +
@@ -809,10 +801,9 @@ public class IndexWriter implements Clos
* you should immediately close the writer, again. See <a
* href="#OOME">above</a> for details.</p>
*
- * @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void close() throws CorruptIndexException, IOException {
+ public void close() throws IOException {
close(true);
}
@@ -839,17 +830,21 @@ public class IndexWriter implements Clos
* finished (which should be at most a few seconds), and
* then return.
*/
- public void close(boolean waitForMerges) throws CorruptIndexException, IOException {
+ public void close(boolean waitForMerges) throws IOException {
- // Ensure that only one thread actually gets to do the closing:
- if (shouldClose()) {
- // If any methods have hit OutOfMemoryError, then abort
- // on close, in case the internal state of IndexWriter
- // or DocumentsWriter is corrupt
- if (hitOOM)
- rollbackInternal();
- else
- closeInternal(waitForMerges);
+ // Ensure that only one thread actually gets to do the
+ // closing, and make sure no commit is also in progress:
+ synchronized(commitLock) {
+ if (shouldClose()) {
+ // If any methods have hit OutOfMemoryError, then abort
+ // on close, in case the internal state of IndexWriter
+ // or DocumentsWriter is corrupt
+ if (hitOOM) {
+ rollbackInternal();
+ } else {
+ closeInternal(waitForMerges, !hitOOM);
+ }
+ }
}
}
@@ -868,12 +863,13 @@ public class IndexWriter implements Clos
// successfully) or another (fails to close)
doWait();
}
- } else
+ } else {
return false;
+ }
}
}
- private void closeInternal(boolean waitForMerges) throws CorruptIndexException, IOException {
+ private void closeInternal(boolean waitForMerges, boolean doFlush) throws IOException {
try {
@@ -889,8 +885,10 @@ public class IndexWriter implements Clos
// Only allow a new merge to be triggered if we are
// going to wait for merges:
- if (!hitOOM) {
+ if (doFlush) {
flush(waitForMerges, true);
+ } else {
+ docWriter.abort(); // already closed
}
if (waitForMerges)
@@ -910,7 +908,7 @@ public class IndexWriter implements Clos
infoStream.message("IW", "now call final commit()");
}
- if (!hitOOM) {
+ if (doFlush) {
commitInternal(null);
}
@@ -981,7 +979,7 @@ public class IndexWriter implements Clos
* are not counted. If you really need these to be
* counted you should call {@link #commit()} first.
* @see #numDocs */
- public synchronized int numDocs() throws IOException {
+ public synchronized int numDocs() {
ensureOpen();
int count;
if (docWriter != null)
@@ -995,7 +993,7 @@ public class IndexWriter implements Clos
return count;
}
- public synchronized boolean hasDeletions() throws IOException {
+ public synchronized boolean hasDeletions() {
ensureOpen();
if (bufferedDeletesStream.any()) {
return true;
@@ -1053,7 +1051,7 @@ public class IndexWriter implements Clos
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void addDocument(Iterable<? extends IndexableField> doc) throws CorruptIndexException, IOException {
+ public void addDocument(Iterable<? extends IndexableField> doc) throws IOException {
addDocument(doc, analyzer);
}
@@ -1072,7 +1070,7 @@ public class IndexWriter implements Clos
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void addDocument(Iterable<? extends IndexableField> doc, Analyzer analyzer) throws CorruptIndexException, IOException {
+ public void addDocument(Iterable<? extends IndexableField> doc, Analyzer analyzer) throws IOException {
updateDocument(null, doc, analyzer);
}
@@ -1117,7 +1115,7 @@ public class IndexWriter implements Clos
*
* @lucene.experimental
*/
- public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws CorruptIndexException, IOException {
+ public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
addDocuments(docs, analyzer);
}
@@ -1132,7 +1130,7 @@ public class IndexWriter implements Clos
*
* @lucene.experimental
*/
- public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws CorruptIndexException, IOException {
+ public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws IOException {
updateDocuments(null, docs, analyzer);
}
@@ -1149,7 +1147,7 @@ public class IndexWriter implements Clos
*
* @lucene.experimental
*/
- public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws CorruptIndexException, IOException {
+ public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
updateDocuments(delTerm, docs, analyzer);
}
@@ -1167,7 +1165,7 @@ public class IndexWriter implements Clos
*
* @lucene.experimental
*/
- public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws CorruptIndexException, IOException {
+ public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws IOException {
ensureOpen();
try {
boolean success = false;
@@ -1201,7 +1199,7 @@ public class IndexWriter implements Clos
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
+ public void deleteDocuments(Term term) throws IOException {
ensureOpen();
try {
docWriter.deleteTerms(term);
@@ -1224,7 +1222,7 @@ public class IndexWriter implements Clos
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
+ public void deleteDocuments(Term... terms) throws IOException {
ensureOpen();
try {
docWriter.deleteTerms(terms);
@@ -1244,7 +1242,7 @@ public class IndexWriter implements Clos
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
+ public void deleteDocuments(Query query) throws IOException {
ensureOpen();
try {
docWriter.deleteQueries(query);
@@ -1266,7 +1264,7 @@ public class IndexWriter implements Clos
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
+ public void deleteDocuments(Query... queries) throws IOException {
ensureOpen();
try {
docWriter.deleteQueries(queries);
@@ -1292,7 +1290,7 @@ public class IndexWriter implements Clos
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void updateDocument(Term term, Iterable<? extends IndexableField> doc) throws CorruptIndexException, IOException {
+ public void updateDocument(Term term, Iterable<? extends IndexableField> doc) throws IOException {
ensureOpen();
updateDocument(term, doc, getAnalyzer());
}
@@ -1316,7 +1314,7 @@ public class IndexWriter implements Clos
* @throws IOException if there is a low-level IO error
*/
public void updateDocument(Term term, Iterable<? extends IndexableField> doc, Analyzer analyzer)
- throws CorruptIndexException, IOException {
+ throws IOException {
ensureOpen();
try {
boolean success = false;
@@ -1450,14 +1448,15 @@ public class IndexWriter implements Clos
* then any thread still running this method might hit a
* {@link MergePolicy.MergeAbortedException}.
*
+ * @param maxNumSegments maximum number of segments left
+ * in the index after merging finishes
+ *
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @see MergePolicy#findMerges
*
- * @param maxNumSegments maximum number of segments left
- * in the index after merging finishes
*/
- public void forceMerge(int maxNumSegments) throws CorruptIndexException, IOException {
+ public void forceMerge(int maxNumSegments) throws IOException {
forceMerge(maxNumSegments, true);
}
@@ -1471,7 +1470,7 @@ public class IndexWriter implements Clos
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
- public void forceMerge(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException {
+ public void forceMerge(int maxNumSegments, boolean doWait) throws IOException {
ensureOpen();
if (maxNumSegments < 1)
@@ -1582,7 +1581,7 @@ public class IndexWriter implements Clos
* {@link MergePolicy.MergeAbortedException}.
*/
public void forceMergeDeletes(boolean doWait)
- throws CorruptIndexException, IOException {
+ throws IOException {
ensureOpen();
flush(true, true);
@@ -1667,7 +1666,7 @@ public class IndexWriter implements Clos
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
- public void forceMergeDeletes() throws CorruptIndexException, IOException {
+ public void forceMergeDeletes() throws IOException {
forceMergeDeletes(true);
}
@@ -1685,18 +1684,18 @@ public class IndexWriter implements Clos
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
- public final void maybeMerge() throws CorruptIndexException, IOException {
+ public final void maybeMerge() throws IOException {
maybeMerge(-1);
}
- private final void maybeMerge(int maxNumSegments) throws CorruptIndexException, IOException {
+ private final void maybeMerge(int maxNumSegments) throws IOException {
ensureOpen(false);
updatePendingMerges(maxNumSegments);
mergeScheduler.merge(this);
}
private synchronized void updatePendingMerges(int maxNumSegments)
- throws CorruptIndexException, IOException {
+ throws IOException {
assert maxNumSegments == -1 || maxNumSegments > 0;
if (stopMerges) {
@@ -1774,9 +1773,13 @@ public class IndexWriter implements Clos
public void rollback() throws IOException {
ensureOpen();
- // Ensure that only one thread actually gets to do the closing:
- if (shouldClose())
- rollbackInternal();
+ // Ensure that only one thread actually gets to do the
+ // closing, and make sure no commit is also in progress:
+ synchronized(commitLock) {
+ if (shouldClose()) {
+ rollbackInternal();
+ }
+ }
}
private void rollbackInternal() throws IOException {
@@ -1786,6 +1789,7 @@ public class IndexWriter implements Clos
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "rollback");
}
+
try {
synchronized(this) {
@@ -1804,7 +1808,8 @@ public class IndexWriter implements Clos
mergeScheduler.close();
bufferedDeletesStream.clear();
-
+ docWriter.close(); // mark it as closed first to prevent subsequent indexing actions/flushes
+ docWriter.abort();
synchronized(this) {
if (pendingCommit != null) {
@@ -1826,8 +1831,7 @@ public class IndexWriter implements Clos
if (infoStream.isEnabled("IW") ) {
infoStream.message("IW", "rollback: infos=" + segString(segmentInfos));
}
-
- docWriter.abort();
+
assert testPoint("rollback before checkpoint");
@@ -1854,7 +1858,7 @@ public class IndexWriter implements Clos
}
}
- closeInternal(false);
+ closeInternal(false, false);
}
/**
@@ -1909,7 +1913,7 @@ public class IndexWriter implements Clos
}
}
- private synchronized void finishMerges(boolean waitForMerges) throws IOException {
+ private synchronized void finishMerges(boolean waitForMerges) {
if (!waitForMerges) {
stopMerges = true;
@@ -1999,11 +2003,11 @@ public class IndexWriter implements Clos
/**
* Prepares the {@link SegmentInfo} for the new flushed segment and persists
* the deleted documents {@link MutableBits}. Use
- * {@link #publishFlushedSegment(SegmentInfo, FrozenBufferedDeletes, FrozenBufferedDeletes)} to
+ * {@link #publishFlushedSegment(SegmentInfoPerCommit, FrozenBufferedDeletes, FrozenBufferedDeletes)} to
* publish the returned {@link SegmentInfo} together with its segment private
* delete packet.
*
- * @see #publishFlushedSegment(SegmentInfo, FrozenBufferedDeletes, FrozenBufferedDeletes)
+ * @see #publishFlushedSegment(SegmentInfoPerCommit, FrozenBufferedDeletes, FrozenBufferedDeletes)
*/
SegmentInfoPerCommit prepareFlushedSegment(FlushedSegment flushedSegment) throws IOException {
assert flushedSegment != null;
@@ -2075,7 +2079,7 @@ public class IndexWriter implements Clos
return newSegment;
}
- synchronized void publishFrozenDeletes(FrozenBufferedDeletes packet) throws IOException {
+ synchronized void publishFrozenDeletes(FrozenBufferedDeletes packet) {
assert packet != null && packet.any();
synchronized (bufferedDeletesStream) {
bufferedDeletesStream.push(packet);
@@ -2184,7 +2188,7 @@ public class IndexWriter implements Clos
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public void addIndexes(Directory... dirs) throws CorruptIndexException, IOException {
+ public void addIndexes(Directory... dirs) throws IOException {
ensureOpen();
noDupDirs(dirs);
@@ -2232,31 +2236,43 @@ public class IndexWriter implements Clos
handleOOM(oom, "addIndexes(Directory...)");
}
}
-
- /** Merges the provided indexes into this index.
- * <p>The provided IndexReaders are not closed.</p>
- *
- * <p><b>NOTE:</b> while this is running, any attempts to
- * add or delete documents (with another thread) will be
- * paused until this method completes.
- *
- * <p>See {@link #addIndexes} for details on transactional
- * semantics, temporary free space required in the Directory,
- * and non-CFS segments on an Exception.</p>
- *
- * <p><b>NOTE</b>: if this method hits an OutOfMemoryError
- * you should immediately close the writer. See <a
- * href="#OOME">above</a> for details.</p>
- *
- * <p><b>NOTE</b>: if you call {@link #close(boolean)}
- * with <tt>false</tt>, which aborts all running merges,
- * then any thread still running this method might hit a
- * {@link MergePolicy.MergeAbortedException}.
- *
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
+
+ /**
+ * Merges the provided indexes into this index.
+ *
+ * <p>
+ * The provided IndexReaders are not closed.
+ *
+ * <p>
+ * See {@link #addIndexes} for details on transactional semantics, temporary
+ * free space required in the Directory, and non-CFS segments on an Exception.
+ *
+ * <p>
+ * <b>NOTE</b>: if this method hits an OutOfMemoryError you should immediately
+ * close the writer. See <a href="#OOME">above</a> for details.
+ *
+ * <p>
+ * <b>NOTE:</b> this method merges all given {@link IndexReader}s in one
+ * merge. If you intend to merge a large number of readers, it may be better
+ * to call this method multiple times, each time with a small set of readers.
+ * In principle, if you use a merge policy with a {@code mergeFactor} or
+ * {@code maxMergeAtOnce} parameter, you should pass that many readers in one
+ * call. Also, if the given readers are {@link DirectoryReader}s, they can be
+ * opened with {@code termIndexInterval=-1} to save RAM, since during merge
+ * the in-memory structure is not used. See
+ * {@link DirectoryReader#open(Directory, int)}.
+ *
+ * <p>
+ * <b>NOTE</b>: if you call {@link #close(boolean)} with <tt>false</tt>, which
+ * aborts all running merges, then any thread still running this method might
+ * hit a {@link MergePolicy.MergeAbortedException}.
+ *
+ * @throws CorruptIndexException
+ * if the index is corrupt
+ * @throws IOException
+ * if there is a low-level IO error
*/
- public void addIndexes(IndexReader... readers) throws CorruptIndexException, IOException {
+ public void addIndexes(IndexReader... readers) throws IOException {
ensureOpen();
int numDocs = 0;
@@ -2444,7 +2460,7 @@ public class IndexWriter implements Clos
* href="#OOME">above</a> for details.</p>
*
* @see #prepareCommit(Map) */
- public final void prepareCommit() throws CorruptIndexException, IOException {
+ public final void prepareCommit() throws IOException {
ensureOpen();
prepareCommit(null);
}
@@ -2479,102 +2495,105 @@ public class IndexWriter implements Clos
* only "stick" if there are actually changes in the
* index to commit.
*/
- public final void prepareCommit(Map<String,String> commitUserData) throws CorruptIndexException, IOException {
+ public final void prepareCommit(Map<String,String> commitUserData) throws IOException {
ensureOpen(false);
- if (infoStream.isEnabled("IW")) {
- infoStream.message("IW", "prepareCommit: flush");
- infoStream.message("IW", " index before flush " + segString());
- }
+ synchronized(commitLock) {
+ if (infoStream.isEnabled("IW")) {
+ infoStream.message("IW", "prepareCommit: flush");
+ infoStream.message("IW", " index before flush " + segString());
+ }
- if (hitOOM) {
- throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot commit");
- }
+ if (hitOOM) {
+ throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot commit");
+ }
- if (pendingCommit != null) {
- throw new IllegalStateException("prepareCommit was already called with no corresponding call to commit");
- }
+ if (pendingCommit != null) {
+ throw new IllegalStateException("prepareCommit was already called with no corresponding call to commit");
+ }
- doBeforeFlush();
- assert testPoint("startDoFlush");
- SegmentInfos toCommit = null;
- boolean anySegmentsFlushed = false;
+ doBeforeFlush();
+ assert testPoint("startDoFlush");
+ SegmentInfos toCommit = null;
+ boolean anySegmentsFlushed = false;
- // This is copied from doFlush, except it's modified to
- // clone & incRef the flushed SegmentInfos inside the
- // sync block:
+ // This is copied from doFlush, except it's modified to
+ // clone & incRef the flushed SegmentInfos inside the
+ // sync block:
- try {
+ try {
- synchronized (fullFlushLock) {
- boolean flushSuccess = false;
- boolean success = false;
- try {
- anySegmentsFlushed = docWriter.flushAllThreads();
- if (!anySegmentsFlushed) {
- // prevent double increment since docWriter#doFlush increments the flushcount
- // if we flushed anything.
- flushCount.incrementAndGet();
- }
- flushSuccess = true;
+ synchronized (fullFlushLock) {
+ boolean flushSuccess = false;
+ boolean success = false;
+ try {
+ anySegmentsFlushed = docWriter.flushAllThreads();
+ if (!anySegmentsFlushed) {
+ // prevent double increment since docWriter#doFlush increments the flushcount
+ // if we flushed anything.
+ flushCount.incrementAndGet();
+ }
+ flushSuccess = true;
- synchronized(this) {
- maybeApplyDeletes(true);
+ synchronized(this) {
+ maybeApplyDeletes(true);
- readerPool.commit(segmentInfos);
+ readerPool.commit(segmentInfos);
- // Must clone the segmentInfos while we still
- // hold fullFlushLock and while sync'd so that
- // no partial changes (eg a delete w/o
- // corresponding add from an updateDocument) can
- // sneak into the commit point:
- toCommit = segmentInfos.clone();
-
- pendingCommitChangeCount = changeCount;
-
- // This protects the segmentInfos we are now going
- // to commit. This is important in case, eg, while
- // we are trying to sync all referenced files, a
- // merge completes which would otherwise have
- // removed the files we are now syncing.
- filesToCommit = toCommit.files(directory, false);
- deleter.incRef(filesToCommit);
- }
- success = true;
- } finally {
- if (!success) {
- if (infoStream.isEnabled("IW")) {
- infoStream.message("IW", "hit exception during prepareCommit");
+ // Must clone the segmentInfos while we still
+ // hold fullFlushLock and while sync'd so that
+ // no partial changes (eg a delete w/o
+ // corresponding add from an updateDocument) can
+ // sneak into the commit point:
+ toCommit = segmentInfos.clone();
+
+ pendingCommitChangeCount = changeCount;
+
+ // This protects the segmentInfos we are now going
+ // to commit. This is important in case, eg, while
+ // we are trying to sync all referenced files, a
+ // merge completes which would otherwise have
+ // removed the files we are now syncing.
+ filesToCommit = toCommit.files(directory, false);
+ deleter.incRef(filesToCommit);
}
+ success = true;
+ } finally {
+ if (!success) {
+ if (infoStream.isEnabled("IW")) {
+ infoStream.message("IW", "hit exception during prepareCommit");
+ }
+ }
+ // Done: finish the full flush!
+ docWriter.finishFullFlush(flushSuccess);
+ doAfterFlush();
}
- // Done: finish the full flush!
- docWriter.finishFullFlush(flushSuccess);
- doAfterFlush();
}
+ } catch (OutOfMemoryError oom) {
+ handleOOM(oom, "prepareCommit");
}
- } catch (OutOfMemoryError oom) {
- handleOOM(oom, "prepareCommit");
- }
- boolean success = false;
- try {
- if (anySegmentsFlushed) {
- maybeMerge();
- }
- success = true;
- } finally {
- if (!success) {
- synchronized (this) {
- deleter.decRef(filesToCommit);
- filesToCommit = null;
+ boolean success = false;
+ try {
+ if (anySegmentsFlushed) {
+ maybeMerge();
+ }
+ success = true;
+ } finally {
+ if (!success) {
+ synchronized (this) {
+ deleter.decRef(filesToCommit);
+ filesToCommit = null;
+ }
}
}
- }
- startCommit(toCommit, commitUserData);
+ startCommit(toCommit, commitUserData);
+ }
}
- // Used only by commit, below; lock order is commitLock -> IW
+ // Used only by commit and prepareCommit, below; lock
+ // order is commitLock -> IW
private final Object commitLock = new Object();
/**
@@ -2607,7 +2626,7 @@ public class IndexWriter implements Clos
* @see #prepareCommit
* @see #commit(Map)
*/
- public final void commit() throws CorruptIndexException, IOException {
+ public final void commit() throws IOException {
commit(null);
}
@@ -2620,20 +2639,22 @@ public class IndexWriter implements Clos
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
- public final void commit(Map<String,String> commitUserData) throws CorruptIndexException, IOException {
+ public final void commit(Map<String,String> commitUserData) throws IOException {
ensureOpen();
commitInternal(commitUserData);
}
- private final void commitInternal(Map<String,String> commitUserData) throws CorruptIndexException, IOException {
+ private final void commitInternal(Map<String,String> commitUserData) throws IOException {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commit: start");
}
synchronized(commitLock) {
+ ensureOpen(false);
+
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commit: enter lock");
}
@@ -2653,7 +2674,7 @@ public class IndexWriter implements Clos
}
}
- private synchronized final void finishCommit() throws CorruptIndexException, IOException {
+ private synchronized final void finishCommit() throws IOException {
if (pendingCommit != null) {
try {
@@ -2699,7 +2720,7 @@ public class IndexWriter implements Clos
* deletes or docs were flushed) if necessary
* @param applyAllDeletes whether pending deletes should also
*/
- protected final void flush(boolean triggerMerge, boolean applyAllDeletes) throws CorruptIndexException, IOException {
+ protected final void flush(boolean triggerMerge, boolean applyAllDeletes) throws IOException {
// NOTE: this method cannot be sync'd because
// maybeMerge() in turn calls mergeScheduler.merge which
@@ -2715,7 +2736,7 @@ public class IndexWriter implements Clos
}
}
- private boolean doFlush(boolean applyAllDeletes) throws CorruptIndexException, IOException {
+ private boolean doFlush(boolean applyAllDeletes) throws IOException {
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
}
@@ -2822,7 +2843,7 @@ public class IndexWriter implements Clos
return docWriter.getNumDocs();
}
- private synchronized void ensureValidMerge(MergePolicy.OneMerge merge) throws IOException {
+ private synchronized void ensureValidMerge(MergePolicy.OneMerge merge) {
for(SegmentInfoPerCommit info : merge.segments) {
if (!segmentInfos.contains(info)) {
throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.info.name + ") that is not in the current index " + segString(), directory);
@@ -2861,7 +2882,7 @@ public class IndexWriter implements Clos
SegmentInfoPerCommit info = sourceSegments.get(i);
minGen = Math.min(info.getBufferedDeletesGen(), minGen);
final int docCount = info.info.getDocCount();
- final Bits prevLiveDocs = merge.readerLiveDocs.get(i);
+ final Bits prevLiveDocs = merge.readers.get(i).getLiveDocs();
final Bits currentLiveDocs;
final ReadersAndLiveDocs rld = readerPool.get(info, false);
// We hold a ref so it should still be in the pool:
@@ -3082,8 +3103,7 @@ public class IndexWriter implements Clos
*
* @lucene.experimental
*/
- public void merge(MergePolicy.OneMerge merge)
- throws CorruptIndexException, IOException {
+ public void merge(MergePolicy.OneMerge merge) throws IOException {
boolean success = false;
@@ -3145,7 +3165,7 @@ public class IndexWriter implements Clos
* are now participating in a merge, and true is
* returned. Else (the merge conflicts) false is
* returned. */
- final synchronized boolean registerMerge(MergePolicy.OneMerge merge) throws MergePolicy.MergeAbortedException, IOException {
+ final synchronized boolean registerMerge(MergePolicy.OneMerge merge) throws IOException {
if (merge.registerDone) {
return true;
@@ -3328,7 +3348,7 @@ public class IndexWriter implements Clos
/** Does fininishing for a merge, which is fast but holds
* the synchronized lock on IndexWriter instance. */
- final synchronized void mergeFinish(MergePolicy.OneMerge merge) throws IOException {
+ final synchronized void mergeFinish(MergePolicy.OneMerge merge) {
// forceMerge, addIndexes or finishMerges may be waiting
// on merges to finish.
@@ -3389,8 +3409,7 @@ public class IndexWriter implements Clos
/** Does the actual (time-consuming) work of the merge,
* but without holding synchronized lock on IndexWriter
* instance */
- private int mergeMiddle(MergePolicy.OneMerge merge)
- throws CorruptIndexException, IOException {
+ private int mergeMiddle(MergePolicy.OneMerge merge) throws IOException {
merge.checkAborted(directory);
@@ -3411,7 +3430,6 @@ public class IndexWriter implements Clos
}
merge.readers = new ArrayList<SegmentReader>();
- merge.readerLiveDocs = new ArrayList<Bits>();
// This is try/finally to make sure merger's readers are
// closed:
@@ -3425,7 +3443,7 @@ public class IndexWriter implements Clos
// Hold onto the "live" reader; we will use this to
// commit merged deletes
final ReadersAndLiveDocs rld = readerPool.get(info, true);
- final SegmentReader reader = rld.getMergeReader(context);
+ SegmentReader reader = rld.getMergeReader(context);
assert reader != null;
// Carefully pull the most recent live docs:
@@ -3451,11 +3469,33 @@ public class IndexWriter implements Clos
}
}
}
- merge.readerLiveDocs.add(liveDocs);
+
+ // Deletes might have happened after we pulled the merge reader and
+ // before we got a read-only copy of the segment's actual live docs
+ // (taking pending deletes into account). In that case we need to
+ // make a new reader with updated live docs and del count.
+ if (reader.numDeletedDocs() != delCount) {
+ // fix the reader's live docs and del count
+ assert delCount > reader.numDeletedDocs(); // beware of zombies
+
+ SegmentReader newReader = new SegmentReader(info, reader.core, liveDocs, info.info.getDocCount() - delCount);
+ boolean released = false;
+ try {
+ rld.release(reader);
+ released = true;
+ } finally {
+ if (!released) {
+ newReader.decRef();
+ }
+ }
+
+ reader = newReader;
+ }
+
merge.readers.add(reader);
assert delCount <= info.info.getDocCount(): "delCount=" + delCount + " info.docCount=" + info.info.getDocCount() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
if (delCount < info.info.getDocCount()) {
- merger.add(reader, liveDocs, delCount);
+ merger.add(reader);
}
segUpto++;
}
@@ -3571,7 +3611,7 @@ public class IndexWriter implements Clos
// lost...
if (infoStream.isEnabled("IW")) {
- infoStream.message("IW", String.format("merged segment size=%.3f MB vs estimate=%.3f MB", merge.info.info.sizeInBytes()/1024./1024., merge.estimatedMergeBytes/1024/1024.));
+ infoStream.message("IW", String.format(Locale.ROOT, "merged segment size=%.3f MB vs estimate=%.3f MB", merge.info.info.sizeInBytes()/1024./1024., merge.estimatedMergeBytes/1024/1024.));
}
final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
@@ -3632,12 +3672,12 @@ public class IndexWriter implements Clos
}
/** @lucene.internal */
- public synchronized String segString() throws IOException {
+ public synchronized String segString() {
return segString(segmentInfos);
}
/** @lucene.internal */
- public synchronized String segString(Iterable<SegmentInfoPerCommit> infos) throws IOException {
+ public synchronized String segString(Iterable<SegmentInfoPerCommit> infos) {
final StringBuilder buffer = new StringBuilder();
for(final SegmentInfoPerCommit info : infos) {
if (buffer.length() > 0) {
@@ -3649,7 +3689,7 @@ public class IndexWriter implements Clos
}
/** @lucene.internal */
- public synchronized String segString(SegmentInfoPerCommit info) throws IOException {
+ public synchronized String segString(SegmentInfoPerCommit info) {
return info.toString(info.info.dir, numDeletedDocs(info) - info.getDelCount());
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java Thu Jul 19 15:58:54 2012
@@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
+import java.util.Locale;
import java.util.Map;
@@ -420,7 +421,7 @@ public abstract class LogMergePolicy ext
*/
@Override
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
- throws CorruptIndexException, IOException {
+ throws IOException {
final List<SegmentInfoPerCommit> segments = segmentInfos.asList();
final int numSegments = segments.size();
@@ -535,7 +536,7 @@ public abstract class LogMergePolicy ext
if (size >= maxMergeSize) {
extra += " [skip: too large]";
}
- message("seg=" + writer.get().segString(info) + " level=" + infoLevel.level + " size=" + String.format("%.3f MB", segBytes/1024/1024.) + extra);
+ message("seg=" + writer.get().segString(info) + " level=" + infoLevel.level + " size=" + String.format(Locale.ROOT, "%.3f MB", segBytes/1024/1024.) + extra);
}
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java Thu Jul 19 15:58:54 2012
@@ -24,7 +24,6 @@ import java.util.Map;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MergeInfo;
-import org.apache.lucene.util.Bits;
import org.apache.lucene.util.SetOnce.AlreadySetException;
import org.apache.lucene.util.SetOnce;
@@ -74,7 +73,6 @@ public abstract class MergePolicy implem
int maxNumSegments = -1; // used by IndexWriter
public long estimatedMergeBytes; // used by IndexWriter
List<SegmentReader> readers; // used by IndexWriter
- List<Bits> readerLiveDocs; // used by IndexWriter
public final List<SegmentInfoPerCommit> segments;
public final int totalDocCount;
boolean aborted;
@@ -299,7 +297,7 @@ public abstract class MergePolicy implem
* the total set of segments in the index
*/
public abstract MergeSpecification findMerges(SegmentInfos segmentInfos)
- throws CorruptIndexException, IOException;
+ throws IOException;
/**
* Determine what set of merge operations is necessary in
@@ -324,7 +322,7 @@ public abstract class MergePolicy implem
*/
public abstract MergeSpecification findForcedMerges(
SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfoPerCommit,Boolean> segmentsToMerge)
- throws CorruptIndexException, IOException;
+ throws IOException;
/**
* Determine what set of merge operations is necessary in order to expunge all
@@ -334,7 +332,7 @@ public abstract class MergePolicy implem
* the total set of segments in the index
*/
public abstract MergeSpecification findForcedDeletesMerges(
- SegmentInfos segmentInfos) throws CorruptIndexException, IOException;
+ SegmentInfos segmentInfos) throws IOException;
/**
* Release all resources for the policy.
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergeScheduler.java Thu Jul 19 15:58:54 2012
@@ -29,10 +29,8 @@ import java.io.IOException;
public abstract class MergeScheduler {
/** Run the merges provided by {@link IndexWriter#getNextMerge()}. */
- public abstract void merge(IndexWriter writer)
- throws CorruptIndexException, IOException;
+ public abstract void merge(IndexWriter writer) throws IOException;
/** Close this MergeScheduler. */
- public abstract void close()
- throws CorruptIndexException, IOException;
+ public abstract void close() throws IOException;
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergeState.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergeState.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergeState.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MergeState.java Thu Jul 19 15:58:54 2012
@@ -31,18 +31,6 @@ import org.apache.lucene.util.packed.Pac
* @lucene.experimental */
public class MergeState {
- public static class IndexReaderAndLiveDocs {
- public final AtomicReader reader;
- public final Bits liveDocs;
- public final int numDeletedDocs;
-
- public IndexReaderAndLiveDocs(AtomicReader reader, Bits liveDocs, int numDeletedDocs) {
- this.reader = reader;
- this.liveDocs = liveDocs;
- this.numDeletedDocs = numDeletedDocs;
- }
- }
-
public static abstract class DocMap {
private final Bits liveDocs;
@@ -50,17 +38,17 @@ public class MergeState {
this.liveDocs = liveDocs;
}
- public static DocMap build(IndexReaderAndLiveDocs reader) {
- final int maxDoc = reader.reader.maxDoc();
- final int numDeletes = reader.numDeletedDocs;
+ public static DocMap build(AtomicReader reader) {
+ final int maxDoc = reader.maxDoc();
+ final int numDeletes = reader.numDeletedDocs();
final int numDocs = maxDoc - numDeletes;
- assert reader.liveDocs != null || numDeletes == 0;
+ assert reader.getLiveDocs() != null || numDeletes == 0;
if (numDeletes == 0) {
return new NoDelDocMap(maxDoc);
} else if (numDeletes < numDocs) {
- return buildDelCountDocmap(maxDoc, numDeletes, reader.liveDocs, PackedInts.COMPACT);
+ return buildDelCountDocmap(maxDoc, numDeletes, reader.getLiveDocs(), PackedInts.COMPACT);
} else {
- return buildDirectDocMap(maxDoc, numDocs, reader.liveDocs, PackedInts.COMPACT);
+ return buildDirectDocMap(maxDoc, numDocs, reader.getLiveDocs(), PackedInts.COMPACT);
}
}
@@ -197,7 +185,7 @@ public class MergeState {
public SegmentInfo segmentInfo;
public FieldInfos fieldInfos;
- public List<IndexReaderAndLiveDocs> readers; // Readers & liveDocs being merged
+ public List<AtomicReader> readers; // Readers being merged
public DocMap[] docMaps; // Maps docIDs around deletions
public int[] docBase; // New docID base per reader
public CheckAbort checkAbort;
@@ -247,7 +235,7 @@ public class MergeState {
* @lucene.internal */
static final MergeState.CheckAbort NONE = new MergeState.CheckAbort(null, null) {
@Override
- public void work(double units) throws MergePolicy.MergeAbortedException {
+ public void work(double units) {
// do nothing
}
};
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java Thu Jul 19 15:58:54 2012
@@ -28,7 +28,6 @@ import org.apache.lucene.index.SortedByt
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.PagedBytes;
-import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.packed.PackedInts.Reader;
/**
@@ -52,7 +51,7 @@ public class MultiDocValues extends DocV
return reader.normValues(field);
}
- public boolean stopLoadingOnNull(AtomicReader reader, String field) throws IOException {
+ public boolean stopLoadingOnNull(AtomicReader reader, String field) {
// for norms we drop all norms if one leaf reader has no norms and the field is present
FieldInfos fieldInfos = reader.getFieldInfos();
FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
@@ -80,7 +79,7 @@ public class MultiDocValues extends DocV
return reader.docValues(field);
}
- public boolean stopLoadingOnNull(AtomicReader reader, String field) throws IOException {
+ public boolean stopLoadingOnNull(AtomicReader reader, String field) {
return false;
}
}
@@ -438,7 +437,7 @@ public class MultiDocValues extends DocV
ordToOffset = type == Type.BYTES_VAR_SORTED ? new long[2] : null;
}
@Override
- public void consume(BytesRef ref, int ord, long offset) throws IOException {
+ public void consume(BytesRef ref, int ord, long offset) {
pagedBytes.copy(ref);
if (ordToOffset != null) {
if (ord+1 >= ordToOffset.length) {
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java Thu Jul 19 15:58:54 2012
@@ -18,7 +18,6 @@ package org.apache.lucene.index;
*/
import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.ReaderSlice;
import java.io.IOException;
@@ -47,7 +46,7 @@ public final class MultiDocsAndPositions
return this.parent == parent;
}
- public MultiDocsAndPositionsEnum reset(final EnumWithSlice[] subs, final int numSubs) throws IOException {
+ public MultiDocsAndPositionsEnum reset(final EnumWithSlice[] subs, final int numSubs) {
this.numSubs = numSubs;
this.subs = new EnumWithSlice[subs.length];
for(int i=0;i<subs.length;i++) {
@@ -56,6 +55,7 @@ public final class MultiDocsAndPositions
this.subs[i].slice = subs[i].slice;
}
upto = -1;
+ doc = -1;
current = null;
return this;
}
@@ -70,6 +70,7 @@ public final class MultiDocsAndPositions
@Override
public int freq() throws IOException {
+ assert current != null;
return current.freq();
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java Thu Jul 19 15:58:54 2012
@@ -17,7 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import org.apache.lucene.util.ReaderSlice;
import java.io.IOException;
import java.util.Arrays;
@@ -43,7 +42,7 @@ public final class MultiDocsEnum extends
subDocsEnum = new DocsEnum[subReaderCount];
}
- MultiDocsEnum reset(final EnumWithSlice[] subs, final int numSubs) throws IOException {
+ MultiDocsEnum reset(final EnumWithSlice[] subs, final int numSubs) {
this.numSubs = numSubs;
this.subs = new EnumWithSlice[subs.length];
@@ -53,6 +52,7 @@ public final class MultiDocsEnum extends
this.subs[i].slice = subs[i].slice;
}
upto = -1;
+ doc = -1;
current = null;
return this;
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiFields.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiFields.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiFields.java Thu Jul 19 15:58:54 2012
@@ -27,8 +27,6 @@ import java.util.concurrent.ConcurrentHa
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.MultiBits;
-import org.apache.lucene.util.ReaderSlice;
/**
* Exposes flex API, merged from flex API of sub-segments.
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiFieldsEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiFieldsEnum.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiFieldsEnum.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiFieldsEnum.java Thu Jul 19 15:58:54 2012
@@ -18,7 +18,6 @@ package org.apache.lucene.index;
*/
import org.apache.lucene.util.PriorityQueue;
-import org.apache.lucene.util.ReaderSlice;
import java.io.IOException;
import java.util.List;
@@ -111,7 +110,7 @@ public final class MultiFieldsEnum exte
final int index;
String current;
- public FieldsEnumWithSlice(FieldsEnum fields, ReaderSlice slice, int index) throws IOException {
+ public FieldsEnumWithSlice(FieldsEnum fields, ReaderSlice slice, int index) {
this.slice = slice;
this.index = index;
assert slice.length >= 0: "length=" + slice.length;
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiReader.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiReader.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiReader.java Thu Jul 19 15:58:54 2012
@@ -45,7 +45,7 @@ public class MultiReader extends BaseCom
* <p>Note that all subreaders are closed if this Multireader is closed.</p>
* @param subReaders set of (sub)readers
*/
- public MultiReader(IndexReader... subReaders) throws IOException {
+ public MultiReader(IndexReader... subReaders) {
this(subReaders, true);
}
@@ -55,7 +55,7 @@ public class MultiReader extends BaseCom
* @param closeSubReaders indicates whether the subreaders should be closed
* when this MultiReader is closed
*/
- public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) throws IOException {
+ public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) {
super(subReaders.clone());
this.closeSubReaders = closeSubReaders;
if (!closeSubReaders) {
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java Thu Jul 19 15:58:54 2012
@@ -23,7 +23,6 @@ import java.util.Comparator;
import java.util.List;
import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.ReaderSlice;
import org.apache.lucene.util.automaton.CompiledAutomaton;
@@ -96,7 +95,7 @@ public final class MultiTerms extends Te
}
@Override
- public long size() throws IOException {
+ public long size() {
return -1;
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java Thu Jul 19 15:58:54 2012
@@ -20,9 +20,6 @@ package org.apache.lucene.index;
import org.apache.lucene.util.PriorityQueue;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BitsSlice;
-import org.apache.lucene.util.MultiBits;
-import org.apache.lucene.util.ReaderSlice;
import java.io.IOException;
import java.util.Arrays;
@@ -261,12 +258,12 @@ public final class MultiTermsEnum extend
}
@Override
- public void seekExact(long ord) throws IOException {
+ public void seekExact(long ord) {
throw new UnsupportedOperationException();
}
@Override
- public long ord() throws IOException {
+ public long ord() {
throw new UnsupportedOperationException();
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoDeletionPolicy.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoDeletionPolicy.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoDeletionPolicy.java Thu Jul 19 15:58:54 2012
@@ -17,7 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
import java.util.List;
/**
@@ -34,8 +33,8 @@ public final class NoDeletionPolicy impl
// keep private to avoid instantiation
}
- public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
+ public void onCommit(List<? extends IndexCommit> commits) {}
- public void onInit(List<? extends IndexCommit> commits) throws IOException {}
+ public void onInit(List<? extends IndexCommit> commits) {}
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoMergePolicy.java Thu Jul 19 15:58:54 2012
@@ -17,7 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
import java.util.Map;
/**
@@ -54,17 +53,14 @@ public final class NoMergePolicy extends
public void close() {}
@Override
- public MergeSpecification findMerges(SegmentInfos segmentInfos)
- throws CorruptIndexException, IOException { return null; }
+ public MergeSpecification findMerges(SegmentInfos segmentInfos) { return null; }
@Override
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
- int maxSegmentCount, Map<SegmentInfoPerCommit,Boolean> segmentsToMerge)
- throws CorruptIndexException, IOException { return null; }
+ int maxSegmentCount, Map<SegmentInfoPerCommit,Boolean> segmentsToMerge) { return null; }
@Override
- public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
- throws CorruptIndexException, IOException { return null; }
+ public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos) { return null; }
@Override
public boolean useCompoundFile(SegmentInfos segments, SegmentInfoPerCommit newSegment) { return useCompoundFile; }
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoMergeScheduler.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoMergeScheduler.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NoMergeScheduler.java Thu Jul 19 15:58:54 2012
@@ -17,8 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-
/**
* A {@link MergeScheduler} which never executes any merges. It is also a
* singleton and can be accessed through {@link NoMergeScheduler#INSTANCE}. Use
@@ -42,5 +40,5 @@ public final class NoMergeScheduler exte
public void close() {}
@Override
- public void merge(IndexWriter writer) throws CorruptIndexException, IOException {}
+ public void merge(IndexWriter writer) {}
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NormsConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NormsConsumer.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NormsConsumer.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/NormsConsumer.java Thu Jul 19 15:58:54 2012
@@ -87,10 +87,10 @@ final class NormsConsumer extends Invert
}
@Override
- void finishDocument() throws IOException {}
+ void finishDocument() {}
@Override
- void startDocument() throws IOException {}
+ void startDocument() {}
@Override
InvertedDocEndConsumerPerField addField(DocInverterPerField docInverterPerField,
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/ParallelAtomicReader.java Thu Jul 19 15:58:54 2012
@@ -162,7 +162,7 @@ public final class ParallelAtomicReader
}
@Override
- public String next() throws IOException {
+ public String next() {
if (keys.hasNext()) {
currentField = keys.next();
} else {
@@ -172,7 +172,7 @@ public final class ParallelAtomicReader
}
@Override
- public Terms terms() throws IOException {
+ public Terms terms() {
return fields.terms(currentField);
}
@@ -185,22 +185,22 @@ public final class ParallelAtomicReader
ParallelFields() {
}
- void addField(String fieldName, Terms terms) throws IOException {
+ void addField(String fieldName, Terms terms) {
fields.put(fieldName, terms);
}
@Override
- public FieldsEnum iterator() throws IOException {
+ public FieldsEnum iterator() {
return new ParallelFieldsEnum(this);
}
@Override
- public Terms terms(String field) throws IOException {
+ public Terms terms(String field) {
return fields.get(field);
}
@Override
- public int size() throws IOException {
+ public int size() {
return fields.size();
}
}
@@ -249,7 +249,7 @@ public final class ParallelAtomicReader
}
@Override
- public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
+ public void document(int docID, StoredFieldVisitor visitor) throws IOException {
ensureOpen();
for (final AtomicReader reader: storedFieldsReaders) {
reader.document(docID, visitor);
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java Thu Jul 19 15:58:54 2012
@@ -28,7 +28,6 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.Version;
/**
@@ -106,8 +105,7 @@ public class PersistentSnapshotDeletionP
* IndexWriter.
*/
public PersistentSnapshotDeletionPolicy(IndexDeletionPolicy primary,
- Directory dir, OpenMode mode, Version matchVersion)
- throws CorruptIndexException, LockObtainFailedException, IOException {
+ Directory dir, OpenMode mode, Version matchVersion) throws IOException {
super(primary, null);
// Initialize the index writer over the snapshot directory.
@@ -175,7 +173,7 @@ public class PersistentSnapshotDeletionP
}
/** Closes the index which writes the snapshots to the directory. */
- public void close() throws CorruptIndexException, IOException {
+ public void close() throws IOException {
writer.close();
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentInfo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentInfo.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentInfo.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentInfo.java Thu Jul 19 15:58:54 2012
@@ -157,7 +157,7 @@ public final class SegmentInfo {
* modify it.
*/
- public Set<String> files() throws IOException {
+ public Set<String> files() {
if (setFiles == null) {
throw new IllegalStateException("files were not computed yet");
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java Thu Jul 19 15:58:54 2012
@@ -32,6 +32,7 @@ import java.util.Map;
import java.util.Set;
import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.LiveDocsFormat;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.ChecksumIndexOutput;
@@ -41,9 +42,7 @@ import org.apache.lucene.store.IOContext
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.NoSuchDirectoryException;
-import org.apache.lucene.util.CodecUtil;
import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.ThreadInterruptedException;
/**
@@ -191,7 +190,7 @@ public final class SegmentInfos implemen
* @param files -- array of file names to check
*/
- public static String getLastCommitSegmentsFileName(String[] files) throws IOException {
+ public static String getLastCommitSegmentsFileName(String[] files) {
return IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
getLastCommitGeneration(files));
@@ -259,7 +258,7 @@ public final class SegmentInfos implemen
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public final void read(Directory directory, String segmentFileName) throws CorruptIndexException, IOException {
+ public final void read(Directory directory, String segmentFileName) throws IOException {
boolean success = false;
// Clear any previous segments:
@@ -314,13 +313,13 @@ public final class SegmentInfos implemen
}
}
- public final void read(Directory directory) throws CorruptIndexException, IOException {
+ public final void read(Directory directory) throws IOException {
generation = lastGeneration = -1;
new FindSegmentsFile(directory) {
@Override
- protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
+ protected Object doBody(String segmentFileName) throws IOException {
read(directory, segmentFileName);
return null;
}
@@ -483,11 +482,11 @@ public final class SegmentInfos implemen
this.directory = directory;
}
- public Object run() throws CorruptIndexException, IOException {
+ public Object run() throws IOException {
return run(null);
}
- public Object run(IndexCommit commit) throws CorruptIndexException, IOException {
+ public Object run(IndexCommit commit) throws IOException {
if (commit != null) {
if (directory != commit.getDirectory())
throw new IOException("the specified commit does not match the specified Directory");
@@ -693,7 +692,7 @@ public final class SegmentInfos implemen
* during the processing that could have been caused by
* a writer committing.
*/
- protected abstract Object doBody(String segmentFileName) throws CorruptIndexException, IOException;
+ protected abstract Object doBody(String segmentFileName) throws IOException;
}
// Carry over generation numbers from another SegmentInfos
@@ -702,7 +701,7 @@ public final class SegmentInfos implemen
generation = other.generation;
}
- final void rollbackCommit(Directory dir) throws IOException {
+ final void rollbackCommit(Directory dir) {
if (pendingSegnOutput != null) {
try {
pendingSegnOutput.close();
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java Thu Jul 19 15:58:54 2012
@@ -34,8 +34,6 @@ import org.apache.lucene.store.IOContext
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.InfoStream;
-import org.apache.lucene.util.ReaderUtil;
-import org.apache.lucene.util.ReaderSlice;
/**
* The SegmentMerger class combines two or more Segments, represented by an IndexReader ({@link #add},
@@ -62,7 +60,7 @@ final class SegmentMerger {
FieldInfos.FieldNumbers fieldNumbers, IOContext context) {
mergeState.segmentInfo = segmentInfo;
mergeState.infoStream = infoStream;
- mergeState.readers = new ArrayList<MergeState.IndexReaderAndLiveDocs>();
+ mergeState.readers = new ArrayList<AtomicReader>();
mergeState.checkAbort = checkAbort;
mergeState.payloadProcessorProvider = payloadProcessorProvider;
directory = dir;
@@ -79,12 +77,12 @@ final class SegmentMerger {
final void add(IndexReader reader) {
for (final AtomicReaderContext ctx : reader.getTopReaderContext().leaves()) {
final AtomicReader r = ctx.reader();
- mergeState.readers.add(new MergeState.IndexReaderAndLiveDocs(r, r.getLiveDocs(), r.numDeletedDocs()));
+ mergeState.readers.add(r);
}
}
- final void add(SegmentReader reader, Bits liveDocs, int delCount) {
- mergeState.readers.add(new MergeState.IndexReaderAndLiveDocs(reader, liveDocs, delCount));
+ final void add(SegmentReader reader) {
+ mergeState.readers.add(reader);
}
/**
@@ -93,7 +91,7 @@ final class SegmentMerger {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- final MergeState merge() throws CorruptIndexException, IOException {
+ final MergeState merge() throws IOException {
// NOTE: it's important to add calls to
// checkAbort.work(...) if you make any changes to this
// method that will spend alot of time. The frequency
@@ -140,14 +138,14 @@ final class SegmentMerger {
// FieldInfos, then we can do a bulk copy of the
// stored fields:
for (int i = 0; i < numReaders; i++) {
- MergeState.IndexReaderAndLiveDocs reader = mergeState.readers.get(i);
+ AtomicReader reader = mergeState.readers.get(i);
// TODO: we may be able to broaden this to
// non-SegmentReaders, since FieldInfos is now
// required? But... this'd also require exposing
// bulk-copy (TVs and stored fields) API in foreign
// readers..
- if (reader.reader instanceof SegmentReader) {
- SegmentReader segmentReader = (SegmentReader) reader.reader;
+ if (reader instanceof SegmentReader) {
+ SegmentReader segmentReader = (SegmentReader) reader;
boolean same = true;
FieldInfos segmentFieldInfos = segmentReader.getFieldInfos();
for (FieldInfo fi : segmentFieldInfos) {
@@ -190,8 +188,7 @@ final class SegmentMerger {
Map<FieldInfo,TypePromoter> docValuesTypes = new HashMap<FieldInfo,TypePromoter>();
Map<FieldInfo,TypePromoter> normValuesTypes = new HashMap<FieldInfo,TypePromoter>();
- for (MergeState.IndexReaderAndLiveDocs readerAndLiveDocs : mergeState.readers) {
- final AtomicReader reader = readerAndLiveDocs.reader;
+ for (AtomicReader reader : mergeState.readers) {
FieldInfos readerFieldInfos = reader.getFieldInfos();
for (FieldInfo fi : readerFieldInfos) {
FieldInfo merged = fieldInfosBuilder.add(fi);
@@ -246,7 +243,7 @@ final class SegmentMerger {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- private int mergeFields() throws CorruptIndexException, IOException {
+ private int mergeFields() throws IOException {
final StoredFieldsWriter fieldsWriter = codec.storedFieldsFormat().fieldsWriter(directory, mergeState.segmentInfo, context);
try {
@@ -285,7 +282,7 @@ final class SegmentMerger {
int i = 0;
while(i < mergeState.readers.size()) {
- final MergeState.IndexReaderAndLiveDocs reader = mergeState.readers.get(i);
+ final AtomicReader reader = mergeState.readers.get(i);
mergeState.docBase[i] = docBase;
final MergeState.DocMap docMap = MergeState.DocMap.build(reader);
@@ -293,7 +290,7 @@ final class SegmentMerger {
docBase += docMap.numDocs();
if (mergeState.payloadProcessorProvider != null) {
- mergeState.readerPayloadProcessor[i] = mergeState.payloadProcessorProvider.getReaderProcessor(reader.reader);
+ mergeState.readerPayloadProcessor[i] = mergeState.payloadProcessorProvider.getReaderProcessor(reader);
}
i++;
@@ -302,7 +299,7 @@ final class SegmentMerger {
return docBase;
}
- private final void mergeTerms(SegmentWriteState segmentWriteState) throws CorruptIndexException, IOException {
+ private final void mergeTerms(SegmentWriteState segmentWriteState) throws IOException {
final List<Fields> fields = new ArrayList<Fields>();
final List<ReaderSlice> slices = new ArrayList<ReaderSlice>();
@@ -310,9 +307,9 @@ final class SegmentMerger {
int docBase = 0;
for(int readerIndex=0;readerIndex<mergeState.readers.size();readerIndex++) {
- final MergeState.IndexReaderAndLiveDocs r = mergeState.readers.get(readerIndex);
- final Fields f = r.reader.fields();
- final int maxDoc = r.reader.maxDoc();
+ final AtomicReader reader = mergeState.readers.get(readerIndex);
+ final Fields f = reader.fields();
+ final int maxDoc = reader.maxDoc();
if (f != null) {
slices.add(new ReaderSlice(docBase, maxDoc, readerIndex));
fields.add(f);
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java Thu Jul 19 15:58:54 2012
@@ -89,7 +89,7 @@ public final class SegmentReader extends
// SegmentReader and using the provided in-memory
// liveDocs. Used by IndexWriter to provide a new NRT
// reader:
- SegmentReader(SegmentInfoPerCommit si, SegmentCoreReaders core, Bits liveDocs, int numDocs) throws IOException {
+ SegmentReader(SegmentInfoPerCommit si, SegmentCoreReaders core, Bits liveDocs, int numDocs) {
this.si = si;
this.core = core;
core.incRef();
@@ -131,7 +131,7 @@ public final class SegmentReader extends
}
@Override
- public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
+ public void document(int docID, StoredFieldVisitor visitor) throws IOException {
if (docID < 0 || docID >= maxDoc()) {
throw new IllegalArgumentException("docID must be >= 0 and < maxDoc=" + maxDoc() + " (got docID=" + docID + ")");
}
@@ -139,7 +139,7 @@ public final class SegmentReader extends
}
@Override
- public Fields fields() throws IOException {
+ public Fields fields() {
ensureOpen();
return core.fields;
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SerialMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SerialMergeScheduler.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SerialMergeScheduler.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SerialMergeScheduler.java Thu Jul 19 15:58:54 2012
@@ -27,8 +27,7 @@ public class SerialMergeScheduler extend
* "synchronized" so that even if the application is using
* multiple threads, only one merge may run at a time. */
@Override
- synchronized public void merge(IndexWriter writer)
- throws CorruptIndexException, IOException {
+ synchronized public void merge(IndexWriter writer) throws IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SingleTermsEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SingleTermsEnum.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SingleTermsEnum.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SingleTermsEnum.java Thu Jul 19 15:58:54 2012
@@ -17,8 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-
import org.apache.lucene.search.MultiTermQuery; // javadocs
import org.apache.lucene.util.BytesRef;
@@ -39,7 +37,7 @@ public final class SingleTermsEnum exten
* After calling the constructor the enumeration is already pointing to the term,
* if it exists.
*/
- public SingleTermsEnum(TermsEnum tenum, BytesRef termText) throws IOException {
+ public SingleTermsEnum(TermsEnum tenum, BytesRef termText) {
super(tenum);
singleRef = termText;
setInitialSeekTerm(termText);
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java Thu Jul 19 15:58:54 2012
@@ -75,7 +75,7 @@ public final class SlowCompositeReaderWr
}
@Override
- public Fields fields() throws IOException {
+ public Fields fields() {
ensureOpen();
return fields;
}
@@ -117,7 +117,7 @@ public final class SlowCompositeReaderWr
}
@Override
- public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
+ public void document(int docID, StoredFieldVisitor visitor) throws IOException {
ensureOpen();
in.document(docID, visitor);
}
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SortedBytesMergeUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SortedBytesMergeUtils.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SortedBytesMergeUtils.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/SortedBytesMergeUtils.java Thu Jul 19 15:58:54 2012
@@ -218,7 +218,7 @@ public final class SortedBytesMergeUtils
current = top[0].current;
}
- private void pushTop() throws IOException {
+ private void pushTop() {
// call next() on each top, and put back into queue
for (int i = 0; i < numTop; i++) {
top[i].current = top[i].next();
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java Thu Jul 19 15:58:54 2012
@@ -38,7 +38,7 @@ final class StandardDirectoryReader exte
/** called only from static open() methods */
StandardDirectoryReader(Directory directory, AtomicReader[] readers, IndexWriter writer,
- SegmentInfos sis, int termInfosIndexDivisor, boolean applyAllDeletes) throws IOException {
+ SegmentInfos sis, int termInfosIndexDivisor, boolean applyAllDeletes) {
super(directory, readers);
this.writer = writer;
this.segmentInfos = sis;
@@ -48,10 +48,10 @@ final class StandardDirectoryReader exte
/** called from DirectoryReader.open(...) methods */
static DirectoryReader open(final Directory directory, final IndexCommit commit,
- final int termInfosIndexDivisor) throws CorruptIndexException, IOException {
+ final int termInfosIndexDivisor) throws IOException {
return (DirectoryReader) new SegmentInfos.FindSegmentsFile(directory) {
@Override
- protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
+ protected Object doBody(String segmentFileName) throws IOException {
SegmentInfos sis = new SegmentInfos();
sis.read(directory, segmentFileName);
final SegmentReader[] readers = new SegmentReader[sis.size()];
@@ -226,12 +226,12 @@ final class StandardDirectoryReader exte
}
@Override
- protected DirectoryReader doOpenIfChanged() throws CorruptIndexException, IOException {
+ protected DirectoryReader doOpenIfChanged() throws IOException {
return doOpenIfChanged(null);
}
@Override
- protected DirectoryReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
+ protected DirectoryReader doOpenIfChanged(final IndexCommit commit) throws IOException {
ensureOpen();
// If we were obtained by writer.getReader(), re-ask the
@@ -244,7 +244,7 @@ final class StandardDirectoryReader exte
}
@Override
- protected DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
+ protected DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws IOException {
ensureOpen();
if (writer == this.writer && applyAllDeletes == this.applyAllDeletes) {
return doOpenFromWriter(null);
@@ -253,7 +253,7 @@ final class StandardDirectoryReader exte
}
}
- private DirectoryReader doOpenFromWriter(IndexCommit commit) throws CorruptIndexException, IOException {
+ private DirectoryReader doOpenFromWriter(IndexCommit commit) throws IOException {
if (commit != null) {
throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() cannot currently accept a commit");
}
@@ -273,7 +273,7 @@ final class StandardDirectoryReader exte
return reader;
}
- private synchronized DirectoryReader doOpenNoWriter(IndexCommit commit) throws CorruptIndexException, IOException {
+ private synchronized DirectoryReader doOpenNoWriter(IndexCommit commit) throws IOException {
if (commit == null) {
if (isCurrent()) {
@@ -290,7 +290,7 @@ final class StandardDirectoryReader exte
return (DirectoryReader) new SegmentInfos.FindSegmentsFile(directory) {
@Override
- protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
+ protected Object doBody(String segmentFileName) throws IOException {
final SegmentInfos infos = new SegmentInfos();
infos.read(directory, segmentFileName);
return doOpenIfChanged(infos, null);
@@ -298,7 +298,7 @@ final class StandardDirectoryReader exte
}.run(commit);
}
- synchronized DirectoryReader doOpenIfChanged(SegmentInfos infos, IndexWriter writer) throws CorruptIndexException, IOException {
+ synchronized DirectoryReader doOpenIfChanged(SegmentInfos infos, IndexWriter writer) throws IOException {
return StandardDirectoryReader.open(directory, writer, infos, getSequentialSubReaders(), termInfosIndexDivisor);
}
@@ -309,7 +309,7 @@ final class StandardDirectoryReader exte
}
@Override
- public boolean isCurrent() throws CorruptIndexException, IOException {
+ public boolean isCurrent() throws IOException {
ensureOpen();
if (writer == null || writer.isClosed()) {
// Fully read the segments file: this ensures that it's
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/StoredFieldsConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/StoredFieldsConsumer.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/StoredFieldsConsumer.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/StoredFieldsConsumer.java Thu Jul 19 15:58:54 2012
@@ -126,7 +126,7 @@ final class StoredFieldsConsumer {
assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument end");
}
- public void addField(IndexableField field, FieldInfo fieldInfo) throws IOException {
+ public void addField(IndexableField field, FieldInfo fieldInfo) {
if (numStoredFields == storedFields.length) {
int newSize = ArrayUtil.oversize(numStoredFields + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
IndexableField[] newArray = new IndexableField[newSize];
Modified: lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java?rev=1363400&r1=1363399&r2=1363400&view=diff
==============================================================================
--- lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java (original)
+++ lucene/dev/branches/pforcodec_3892/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java Thu Jul 19 15:58:54 2012
@@ -158,7 +158,7 @@ final class TermVectorsConsumer extends
}
@Override
- void startDocument() throws IOException {
+ void startDocument() {
assert clearLastVectorFieldName();
reset();
}