You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ro...@apache.org on 2014/12/08 16:41:03 UTC

svn commit: r1643845 [1/2] - in /lucene/dev/branches/lucene2878: ./ lucene/ lucene/analysis/ lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/ lucene/core/ lucene/core/src/java/org...

Author: romseygeek
Date: Mon Dec  8 15:41:02 2014
New Revision: 1643845

URL: http://svn.apache.org/r1643845
Log:
Merge trunk

Added:
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/AbortingException.java
      - copied unchanged from r1643844, lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/index/AbortingException.java
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java
      - copied unchanged from r1643844, lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50StoredFieldsFormatHighCompression.java
    lucene/dev/branches/lucene2878/lucene/test-framework/src/java/org/apache/lucene/index/SuppressingConcurrentMergeScheduler.java
      - copied unchanged from r1643844, lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/index/SuppressingConcurrentMergeScheduler.java
Removed:
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterAbort.java
Modified:
    lucene/dev/branches/lucene2878/   (props changed)
    lucene/dev/branches/lucene2878/lucene/   (props changed)
    lucene/dev/branches/lucene2878/lucene/CHANGES.txt   (contents, props changed)
    lucene/dev/branches/lucene2878/lucene/analysis/   (props changed)
    lucene/dev/branches/lucene2878/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUNormalizer2CharFilter.java
    lucene/dev/branches/lucene2878/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java
    lucene/dev/branches/lucene2878/lucene/core/   (props changed)
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50StoredFieldsFormat.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
    lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions2.java
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
    lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
    lucene/dev/branches/lucene2878/lucene/sandbox/   (props changed)
    lucene/dev/branches/lucene2878/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
    lucene/dev/branches/lucene2878/lucene/spatial/   (props changed)
    lucene/dev/branches/lucene2878/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/NumberRangePrefixTree.java
    lucene/dev/branches/lucene2878/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/DateNRStrategyTest.java
    lucene/dev/branches/lucene2878/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/tree/DateRangePrefixTreeTest.java
    lucene/dev/branches/lucene2878/lucene/test-framework/   (props changed)
    lucene/dev/branches/lucene2878/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java
    lucene/dev/branches/lucene2878/solr/   (props changed)
    lucene/dev/branches/lucene2878/solr/CHANGES.txt   (contents, props changed)
    lucene/dev/branches/lucene2878/solr/contrib/   (props changed)
    lucene/dev/branches/lucene2878/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java
    lucene/dev/branches/lucene2878/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangDetectLanguageIdentifierUpdateProcessor.java
    lucene/dev/branches/lucene2878/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangIdParams.java
    lucene/dev/branches/lucene2878/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java
    lucene/dev/branches/lucene2878/solr/contrib/langid/src/java/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessor.java
    lucene/dev/branches/lucene2878/solr/contrib/langid/src/test/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessorFactoryTest.java
    lucene/dev/branches/lucene2878/solr/contrib/morphlines-core/src/test-files/solr/collection1/conf/solrconfig.xml
    lucene/dev/branches/lucene2878/solr/contrib/morphlines-core/src/test-files/solr/minimr/conf/solrconfig.xml
    lucene/dev/branches/lucene2878/solr/contrib/morphlines-core/src/test-files/solr/mrunit/conf/solrconfig.xml
    lucene/dev/branches/lucene2878/solr/contrib/morphlines-core/src/test-files/solr/solrcelltest/collection1/conf/solrconfig.xml
    lucene/dev/branches/lucene2878/solr/contrib/morphlines-core/src/test-files/solr/solrcloud/conf/solrconfig.xml

Modified: lucene/dev/branches/lucene2878/lucene/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/CHANGES.txt?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/CHANGES.txt (original)
+++ lucene/dev/branches/lucene2878/lucene/CHANGES.txt Mon Dec  8 15:41:02 2014
@@ -123,6 +123,9 @@ New Features
   queries provided that term vectors with positions, offsets, and payloads are present. This is the
   only highlighter that can highlight such queries accurately. (David Smiley)
 
+* LUCENE-5914: Add an option to Lucene50Codec to support either BEST_SPEED
+  or BEST_COMPRESSION for stored fields. (Adrien Grand, Robert Muir)
+
 Optimizations
 
 * LUCENE-5960: Use a more efficient bitset, not a Set<Integer>, to
@@ -351,6 +354,13 @@ Bug Fixes
 
 * LUCENE-6075: Don't overflow int in SimpleRateLimiter (Boaz Leskes
   via Mike McCandless)
+
+* LUCENE-5987: IndexWriter will now forcefully close itself on
+  aborting exception (an exception that would otherwise cause silent
+  data loss).  (Robert Muir, Mike McCandless)
+
+* LUCENE-6094: Allow IW.rollback to stop ConcurrentMergeScheduler even
+  when it's stalling because there are too many merges. (Mike McCandless)
   
 Documentation
 

Modified: lucene/dev/branches/lucene2878/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUNormalizer2CharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUNormalizer2CharFilter.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUNormalizer2CharFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUNormalizer2CharFilter.java Mon Dec  8 15:41:02 2014
@@ -19,6 +19,7 @@ package org.apache.lucene.analysis.icu;
 
 import java.io.IOException;
 import java.io.Reader;
+import java.util.Objects;
 
 import org.apache.lucene.analysis.charfilter.BaseCharFilter;
 
@@ -29,8 +30,6 @@ import com.ibm.icu.text.Normalizer2;
  */
 public final class ICUNormalizer2CharFilter extends BaseCharFilter {
 
-  private static final int IO_BUFFER_SIZE = 128;
-
   private final Normalizer2 normalizer;
   private final StringBuilder inputBuffer = new StringBuilder();
   private final StringBuilder resultBuffer = new StringBuilder();
@@ -55,11 +54,14 @@ public final class ICUNormalizer2CharFil
    * @param normalizer normalizer to use
    */
   public ICUNormalizer2CharFilter(Reader in, Normalizer2 normalizer) {
+    this(in, normalizer, 128);
+  }
+  
+  // for testing ONLY
+  ICUNormalizer2CharFilter(Reader in, Normalizer2 normalizer, int bufferSize) {
     super(in);
-    if (normalizer == null) {
-      throw new NullPointerException("normalizer == null");
-    }
-    this.normalizer = normalizer;
+    this.normalizer = Objects.requireNonNull(normalizer);
+    this.tmpBuffer = new char[bufferSize];
   }
 
   @Override
@@ -92,7 +94,7 @@ public final class ICUNormalizer2CharFil
     return -1;
   }
 
-  private final char[] tmpBuffer = new char[IO_BUFFER_SIZE];
+  private final char[] tmpBuffer;
 
   private int readInputToBuffer() throws IOException {
     final int len = input.read(tmpBuffer);

Modified: lucene/dev/branches/lucene2878/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2CharFilter.java Mon Dec  8 15:41:02 2014
@@ -105,7 +105,7 @@ public class TestICUNormalizer2CharFilte
     );
   }
 
-  public void doTestMode(final Normalizer2 normalizer, int maxLength, int iterations) throws IOException {
+  public void doTestMode(final Normalizer2 normalizer, int maxLength, int iterations, int bufferSize) throws IOException {
     Analyzer a = new Analyzer() {
       @Override
       protected TokenStreamComponents createComponents(String fieldName) {
@@ -114,7 +114,7 @@ public class TestICUNormalizer2CharFilte
 
       @Override
       protected Reader initReader(String fieldName, Reader reader) {
-        return new ICUNormalizer2CharFilter(reader, normalizer);
+        return new ICUNormalizer2CharFilter(reader, normalizer, bufferSize);
       }
     };
 
@@ -132,43 +132,43 @@ public class TestICUNormalizer2CharFilte
   }
 
   public void testNFC() throws Exception {
-    doTestMode(Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.COMPOSE), 20, RANDOM_MULTIPLIER*1000);
+    doTestMode(Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.COMPOSE), 20, RANDOM_MULTIPLIER*1000, 128);
   }
   
   public void testNFCHuge() throws Exception {
-    doTestMode(Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.COMPOSE), 8192, RANDOM_MULTIPLIER*100);
+    doTestMode(Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.COMPOSE), 256, RANDOM_MULTIPLIER*500, 16);
   }
 
   public void testNFD() throws Exception {
-    doTestMode(Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.DECOMPOSE), 20, RANDOM_MULTIPLIER*1000);
+    doTestMode(Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.DECOMPOSE), 20, RANDOM_MULTIPLIER*1000, 128);
   }
   
   public void testNFDHuge() throws Exception {
-    doTestMode(Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.DECOMPOSE), 8192, RANDOM_MULTIPLIER*100);
+    doTestMode(Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.DECOMPOSE), 256, RANDOM_MULTIPLIER*500, 16);
   }
 
   public void testNFKC() throws Exception {
-    doTestMode(Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.COMPOSE), 20, RANDOM_MULTIPLIER*1000);
+    doTestMode(Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.COMPOSE), 20, RANDOM_MULTIPLIER*1000, 128);
   }
   
   public void testNFKCHuge() throws Exception {
-    doTestMode(Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.COMPOSE), 8192, RANDOM_MULTIPLIER*100);
+    doTestMode(Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.COMPOSE), 256, RANDOM_MULTIPLIER*500, 16);
   }
 
   public void testNFKD() throws Exception {
-    doTestMode(Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.DECOMPOSE), 20, RANDOM_MULTIPLIER*1000);
+    doTestMode(Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.DECOMPOSE), 20, RANDOM_MULTIPLIER*1000, 128);
   }
   
   public void testNFKDHuge() throws Exception {
-    doTestMode(Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.DECOMPOSE), 8192, RANDOM_MULTIPLIER*100);
+    doTestMode(Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.DECOMPOSE), 256, RANDOM_MULTIPLIER*500, 16);
   }
 
   public void testNFKC_CF() throws Exception {
-    doTestMode(Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE), 20, RANDOM_MULTIPLIER*1000);
+    doTestMode(Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE), 20, RANDOM_MULTIPLIER*1000, 128);
   }
   
   public void testNFKC_CFHuge() throws Exception {
-    doTestMode(Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE), 8192, RANDOM_MULTIPLIER*100);
+    doTestMode(Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE), 256, RANDOM_MULTIPLIER*500, 16);
   }
 
   public void testRandomStrings() throws IOException {
@@ -186,7 +186,7 @@ public class TestICUNormalizer2CharFilte
     };
     checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
     // huge strings
-    checkRandomData(random(), a, 100*RANDOM_MULTIPLIER, 8192);
+    checkRandomData(random(), a, 25*RANDOM_MULTIPLIER, 8192);
 
     // nfkd
     a = new Analyzer() {
@@ -202,7 +202,7 @@ public class TestICUNormalizer2CharFilte
     };
     checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
     // huge strings
-    checkRandomData(random(), a, 100*RANDOM_MULTIPLIER, 8192);
+    checkRandomData(random(), a, 25*RANDOM_MULTIPLIER, 8192);
   }
   
   public void testCuriousString() throws Exception {
@@ -408,7 +408,7 @@ public class TestICUNormalizer2CharFilte
         return new ICUNormalizer2CharFilter(reader, Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE));
       }
     };
-    for (int i = 0; i < 1000; i++) {
+    for (int i = 0; i < 25; i++) {
       checkAnalysisConsistency(random(), a, false, text);
     }
   }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50Codec.java Mon Dec  8 15:41:02 2014
@@ -17,6 +17,8 @@ package org.apache.lucene.codecs.lucene5
  * limitations under the License.
  */
 
+import java.util.Objects;
+
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.CompoundFormat;
 import org.apache.lucene.codecs.DocValuesFormat;
@@ -28,6 +30,7 @@ import org.apache.lucene.codecs.Postings
 import org.apache.lucene.codecs.SegmentInfoFormat;
 import org.apache.lucene.codecs.StoredFieldsFormat;
 import org.apache.lucene.codecs.TermVectorsFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
 import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat;
 import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat;
 
@@ -42,7 +45,6 @@ import org.apache.lucene.codecs.perfield
  * @lucene.experimental
  */
 public class Lucene50Codec extends Codec {
-  private final StoredFieldsFormat fieldsFormat = new Lucene50StoredFieldsFormat();
   private final TermVectorsFormat vectorsFormat = new Lucene50TermVectorsFormat();
   private final FieldInfosFormat fieldInfosFormat = new Lucene50FieldInfosFormat();
   private final SegmentInfoFormat segmentInfosFormat = new Lucene50SegmentInfoFormat();
@@ -62,15 +64,30 @@ public class Lucene50Codec extends Codec
       return Lucene50Codec.this.getDocValuesFormatForField(field);
     }
   };
+  
+  private final StoredFieldsFormat storedFieldsFormat;
 
-  /** Sole constructor. */
+  /** 
+   * Instantiates a new codec.
+   */
   public Lucene50Codec() {
+    this(Mode.BEST_SPEED);
+  }
+  
+  /** 
+   * Instantiates a new codec, specifying the stored fields compression
+   * mode to use.
+   * @param mode stored fields compression mode to use for newly 
+   *             flushed/merged segments.
+   */
+  public Lucene50Codec(Mode mode) {
     super("Lucene50");
+    this.storedFieldsFormat = new Lucene50StoredFieldsFormat(Objects.requireNonNull(mode));
   }
   
   @Override
   public final StoredFieldsFormat storedFieldsFormat() {
-    return fieldsFormat;
+    return storedFieldsFormat;
   }
   
   @Override
@@ -106,7 +123,11 @@ public class Lucene50Codec extends Codec
   /** Returns the postings format that should be used for writing 
    *  new segments of <code>field</code>.
    *  
-   *  The default implementation always returns "Lucene50"
+   *  The default implementation always returns "Lucene50".
+   *  <p>
+   *  <b>WARNING:</b> if you subclass, you are responsible for index 
+   *  backwards compatibility: future version of Lucene are only 
+   *  guaranteed to be able to read the default implementation. 
    */
   public PostingsFormat getPostingsFormatForField(String field) {
     return defaultFormat;
@@ -115,7 +136,11 @@ public class Lucene50Codec extends Codec
   /** Returns the docvalues format that should be used for writing 
    *  new segments of <code>field</code>.
    *  
-   *  The default implementation always returns "Lucene50"
+   *  The default implementation always returns "Lucene50".
+   *  <p>
+   *  <b>WARNING:</b> if you subclass, you are responsible for index 
+   *  backwards compatibility: future version of Lucene are only 
+   *  guaranteed to be able to read the default implementation. 
    */
   public DocValuesFormat getDocValuesFormatForField(String field) {
     return defaultDVFormat;

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50StoredFieldsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50StoredFieldsFormat.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50StoredFieldsFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50StoredFieldsFormat.java Mon Dec  8 15:41:02 2014
@@ -17,26 +17,46 @@ package org.apache.lucene.codecs.lucene5
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.Objects;
+
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.StoredFieldsFormat;
+import org.apache.lucene.codecs.StoredFieldsReader;
+import org.apache.lucene.codecs.StoredFieldsWriter;
 import org.apache.lucene.codecs.compressing.CompressingStoredFieldsFormat;
 import org.apache.lucene.codecs.compressing.CompressingStoredFieldsIndexWriter;
 import org.apache.lucene.codecs.compressing.CompressionMode;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.SegmentInfo;
 import org.apache.lucene.index.StoredFieldVisitor;
 import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.packed.PackedInts;
 
 /**
  * Lucene 5.0 stored fields format.
  *
  * <p><b>Principle</b></p>
- * <p>This {@link StoredFieldsFormat} compresses blocks of 16KB of documents in
+ * <p>This {@link StoredFieldsFormat} compresses blocks of documents in
  * order to improve the compression ratio compared to document-level
  * compression. It uses the <a href="http://code.google.com/p/lz4/">LZ4</a>
- * compression algorithm, which is fast to compress and very fast to decompress
- * data. Although the compression method that is used focuses more on speed
- * than on compression ratio, it should provide interesting compression ratios
- * for redundant inputs (such as log files, HTML or plain text).</p>
+ * compression algorithm by default in 16KB blocks, which is fast to compress 
+ * and very fast to decompress data. Although the default compression method 
+ * that is used ({@link Mode#BEST_SPEED BEST_SPEED}) focuses more on speed than on 
+ * compression ratio, it should provide interesting compression ratios
+ * for redundant inputs (such as log files, HTML or plain text). For higher
+ * compression, you can choose ({@link Mode#BEST_COMPRESSION BEST_COMPRESSION}), which uses 
+ * the <a href="http://en.wikipedia.org/wiki/DEFLATE">DEFLATE</a> algorithm with 24KB blocks 
+ * for a better ratio at the expense of slower performance. 
+ * These two options can be configured like this: </p>
+ * <pre class="prettyprint">
+ *   // the default: for high performance
+ *   indexWriterConfig.setCodec(new Lucene50Codec(Mode.BEST_SPEED));
+ *   // instead for higher performance (but slower):
+ *   // indexWriterConfig.setCodec(new Lucene50Codec(Mode.BEST_COMPRESSION));
+ * </pre>
  * <p><b>File formats</b></p>
  * <p>Stored fields are represented by two files:</p>
  * <ol>
@@ -114,11 +134,58 @@ import org.apache.lucene.util.packed.Pac
  * larger than (<tt>2<sup>31</sup> - 2<sup>14</sup></tt>) bytes.</p>
  * @lucene.experimental
  */
-public final class Lucene50StoredFieldsFormat extends CompressingStoredFieldsFormat {
-
-  /** Sole constructor. */
+public final class Lucene50StoredFieldsFormat extends StoredFieldsFormat {
+  
+  /** Configuration option for stored fields. */
+  public static enum Mode {
+    /** Trade compression ratio for retrieval speed. */
+    BEST_SPEED,
+    /** Trade retrieval speed for compression ratio. */
+    BEST_COMPRESSION
+  }
+  
+  /** Attribute key for compression mode. */
+  public static final String MODE_KEY = Lucene50StoredFieldsFormat.class.getSimpleName() + ".mode";
+  
+  final Mode mode;
+  
+  /** Stored fields format with default options */
   public Lucene50StoredFieldsFormat() {
-    super("Lucene50StoredFields", CompressionMode.FAST, 1 << 14, 128);
+    this(Mode.BEST_SPEED);
+  }
+  
+  /** Stored fields format with specified mode */
+  public Lucene50StoredFieldsFormat(Mode mode) {
+    this.mode = Objects.requireNonNull(mode);
   }
 
+  @Override
+  public StoredFieldsReader fieldsReader(Directory directory, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException {
+    String value = si.getAttribute(MODE_KEY);
+    if (value == null) {
+      throw new IllegalStateException("missing value for " + MODE_KEY + " for segment: " + si.name);
+    }
+    Mode mode = Mode.valueOf(value);
+    return impl(mode).fieldsReader(directory, si, fn, context);
+  }
+
+  @Override
+  public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException {
+    String previous = si.putAttribute(MODE_KEY, mode.name());
+    if (previous != null) {
+      throw new IllegalStateException("found existing value for " + MODE_KEY + " for segment: " + si.name +
+                                      "old=" + previous + ", new=" + mode.name());
+    }
+    return impl(mode).fieldsWriter(directory, si, context);
+  }
+  
+  StoredFieldsFormat impl(Mode mode) {
+    switch (mode) {
+      case BEST_SPEED: 
+        return new CompressingStoredFieldsFormat("Lucene50StoredFieldsFast", CompressionMode.FAST, 1 << 14, 128);
+      case BEST_COMPRESSION: 
+        return new CompressingStoredFieldsFormat("Lucene50StoredFieldsHigh", CompressionMode.HIGH_COMPRESSION, 24576, 512);
+      default: throw new AssertionError();
+    }
+  }
 }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java Mon Dec  8 15:41:02 2014
@@ -300,8 +300,11 @@ public class ConcurrentMergeScheduler ex
   protected synchronized int mergeThreadCount() {
     int count = 0;
     for (MergeThread mt : mergeThreads) {
-      if (mt.isAlive() && mt.getCurrentMerge() != null) {
-        count++;
+      if (mt.isAlive()) {
+        MergePolicy.OneMerge merge = mt.getCurrentMerge();
+        if (merge != null && merge.isAborted() == false) {
+          count++;
+        }
       }
     }
     return count;
@@ -399,7 +402,8 @@ public class ConcurrentMergeScheduler ex
         message("    too many merges; stalling...");
       }
       try {
-        wait();
+        // Only wait 0.25 seconds, so if all merges are aborted (by IW.rollback) we notice:
+        wait(250);
       } catch (InterruptedException ie) {
         throw new ThreadInterruptedException(ie);
       }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java Mon Dec  8 15:41:02 2014
@@ -83,7 +83,7 @@ final class DefaultIndexingChain extends
   }
 
   @Override
-  public void flush(SegmentWriteState state) throws IOException {
+  public void flush(SegmentWriteState state) throws IOException, AbortingException {
 
     // NOTE: caller (DocumentsWriterPerThread) handles
     // aborting on any exception from this method
@@ -176,7 +176,7 @@ final class DefaultIndexingChain extends
   /** Catch up for all docs before us that had no stored
    *  fields, or hit non-aborting exceptions before writing
    *  stored fields. */
-  private void fillStoredFields(int docID) throws IOException {
+  private void fillStoredFields(int docID) throws IOException, AbortingException {
     while (lastStoredDocID < docID) {
       startStoredFields();
       finishStoredFields();
@@ -253,36 +253,28 @@ final class DefaultIndexingChain extends
 
   /** Calls StoredFieldsWriter.startDocument, aborting the
    *  segment if it hits any exception. */
-  private void startStoredFields() throws IOException {
-    boolean success = false;
+  private void startStoredFields() throws IOException, AbortingException {
     try {
       initStoredFieldsWriter();
       storedFieldsWriter.startDocument();
-      success = true;
-    } finally {
-      if (success == false) {
-        docWriter.setAborting();        
-      }
+    } catch (Throwable th) {
+      throw new AbortingException(th);
     }
     lastStoredDocID++;
   }
 
   /** Calls StoredFieldsWriter.finishDocument, aborting the
    *  segment if it hits any exception. */
-  private void finishStoredFields() throws IOException {
-    boolean success = false;
+  private void finishStoredFields() throws IOException, AbortingException {
     try {
       storedFieldsWriter.finishDocument();
-      success = true;
-    } finally {
-      if (success == false) {
-        docWriter.setAborting();        
-      }
+    } catch (Throwable th) {
+      throw new AbortingException(th);
     }
   }
 
   @Override
-  public void processDocument() throws IOException {
+  public void processDocument() throws IOException, AbortingException {
 
     // How many indexed field names we've seen (collapses
     // multiple field instances by the same name):
@@ -325,16 +317,12 @@ final class DefaultIndexingChain extends
       }
     }
 
-    boolean success = false;
     try {
       termsHash.finishDocument();
-      success = true;
-    } finally {
-      if (success == false) {
-        // Must abort, on the possibility that on-disk term
-        // vectors are now corrupt:
-        docWriter.setAborting();
-      }
+    } catch (Throwable th) {
+      // Must abort, on the possibility that on-disk term
+      // vectors are now corrupt:
+      throw new AbortingException(th);
     }
 
     // Add stored fields:
@@ -352,9 +340,12 @@ final class DefaultIndexingChain extends
         
         PerField fp = getOrAddField(fieldName, fieldType, false);
         if (fieldType.stored()) {
-          abort = true;
-          storedFieldsWriter.writeField(fp.fieldInfo, field);
-          abort = false;
+          try {
+            storedFieldsWriter.writeField(fp.fieldInfo, field);
+          } catch (Throwable th) {
+            abort = true;
+            throw new AbortingException(th);
+          }
         }
 
         DocValuesType dvType = fieldType.docValuesType();
@@ -366,9 +357,7 @@ final class DefaultIndexingChain extends
         }
       }
     } finally {
-      if (abort) {
-        docWriter.setAborting();
-      } else {
+      if (abort == false) {
         finishStoredFields();
       }
     }
@@ -575,7 +564,7 @@ final class DefaultIndexingChain extends
     /** Inverts one field for one document; first is true
      *  if this is the first time we are seeing this field
      *  name in this document. */
-    public void invert(IndexableField field, boolean first) throws IOException {
+    public void invert(IndexableField field, boolean first) throws IOException, AbortingException {
       if (first) {
         // First time we're seeing this field (indexed) in
         // this document:
@@ -602,7 +591,6 @@ final class DefaultIndexingChain extends
        * when we fail. We expect some caller to eventually deal with the real exception, so we don't want any 'catch' clauses,
        * but rather a finally that takes note of the problem.
        */
-      boolean aborting = false;
       boolean succeededInProcessingField = false;
       try (TokenStream stream = tokenStream = field.tokenStream(docState.analyzer, tokenStream)) {
         // reset the TokenStream to the first token
@@ -654,9 +642,21 @@ final class DefaultIndexingChain extends
           // internal state of the terms hash is now
           // corrupt and should not be flushed to a
           // new segment:
-          aborting = true;
-          termsHashPerField.add();
-          aborting = false;
+          try {
+            termsHashPerField.add();
+          } catch (MaxBytesLengthExceededException e) {
+            byte[] prefix = new byte[30];
+            BytesRef bigTerm = invertState.termAttribute.getBytesRef();
+            System.arraycopy(bigTerm.bytes, bigTerm.offset, prefix, 0, 30);
+            String msg = "Document contains at least one immense term in field=\"" + fieldInfo.name + "\" (whose UTF8 encoding is longer than the max length " + DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8 + "), all of which were skipped.  Please correct the analyzer to not produce such terms.  The prefix of the first immense term is: '" + Arrays.toString(prefix) + "...', original message: " + e.getMessage();
+            if (docState.infoStream.isEnabled("IW")) {
+              docState.infoStream.message("IW", "ERROR: " + msg);
+            }
+            // Document will be deleted above:
+            throw new IllegalArgumentException(msg, e);
+          } catch (Throwable th) {
+            throw new AbortingException(th);
+          }
         }
 
         // trigger streams to perform end-of-stream operations
@@ -669,22 +669,7 @@ final class DefaultIndexingChain extends
 
         /* if there is an exception coming through, we won't set this to true here:*/
         succeededInProcessingField = true;
-      } catch (MaxBytesLengthExceededException e) {
-        aborting = false;
-        byte[] prefix = new byte[30];
-        BytesRef bigTerm = invertState.termAttribute.getBytesRef();
-        System.arraycopy(bigTerm.bytes, bigTerm.offset, prefix, 0, 30);
-        String msg = "Document contains at least one immense term in field=\"" + fieldInfo.name + "\" (whose UTF8 encoding is longer than the max length " + DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8 + "), all of which were skipped.  Please correct the analyzer to not produce such terms.  The prefix of the first immense term is: '" + Arrays.toString(prefix) + "...', original message: " + e.getMessage();
-        if (docState.infoStream.isEnabled("IW")) {
-          docState.infoStream.message("IW", "ERROR: " + msg);
-        }
-        // Document will be deleted above:
-        throw new IllegalArgumentException(msg, e);
       } finally {
-        if (succeededInProcessingField == false && aborting) {
-          docState.docWriter.setAborting();
-        }
-
         if (!succeededInProcessingField && docState.infoStream.isEnabled("DW")) {
           docState.infoStream.message("DW", "An exception was thrown while processing field " + fieldInfo.name);
         }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocConsumer.java Mon Dec  8 15:41:02 2014
@@ -20,7 +20,7 @@ package org.apache.lucene.index;
 import java.io.IOException;
 
 abstract class DocConsumer {
-  abstract void processDocument() throws IOException;
-  abstract void flush(final SegmentWriteState state) throws IOException;
+  abstract void processDocument() throws IOException, AbortingException;
+  abstract void flush(final SegmentWriteState state) throws IOException, AbortingException;
   abstract void abort();
 }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java Mon Dec  8 15:41:02 2014
@@ -35,6 +35,7 @@ import org.apache.lucene.search.Query;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.InfoStream;
 
 /**
@@ -203,7 +204,6 @@ final class DocumentsWriter implements C
   synchronized void abort(IndexWriter writer) {
     assert !Thread.holdsLock(writer) : "IndexWriter lock should never be hold when aborting";
     boolean success = false;
-    final Set<String> newFilesSet = new HashSet<>();
     try {
       deleteQueue.clear();
       if (infoStream.isEnabled("DW")) {
@@ -214,18 +214,17 @@ final class DocumentsWriter implements C
         final ThreadState perThread = perThreadPool.getThreadState(i);
         perThread.lock();
         try {
-          abortThreadState(perThread, newFilesSet);
+          abortThreadState(perThread);
         } finally {
           perThread.unlock();
         }
       }
-      flushControl.abortPendingFlushes(newFilesSet);
-      putEvent(new DeleteNewFilesEvent(newFilesSet));
+      flushControl.abortPendingFlushes();
       flushControl.waitForFlush();
       success = true;
     } finally {
       if (infoStream.isEnabled("DW")) {
-        infoStream.message("DW", "done abort; abortedFiles=" + newFilesSet + " success=" + success);
+        infoStream.message("DW", "done abort success=" + success);
       }
     }
   }
@@ -239,15 +238,13 @@ final class DocumentsWriter implements C
     try {
       deleteQueue.clear();
       final int limit = perThreadPool.getMaxThreadStates();
-      final Set<String> newFilesSet = new HashSet<>();
       for (int i = 0; i < limit; i++) {
         final ThreadState perThread = perThreadPool.getThreadState(i);
         perThread.lock();
-        abortThreadState(perThread, newFilesSet);
+        abortThreadState(perThread);
       }
       deleteQueue.clear();
-      flushControl.abortPendingFlushes(newFilesSet);
-      putEvent(new DeleteNewFilesEvent(newFilesSet));
+      flushControl.abortPendingFlushes();
       flushControl.waitForFlush();
       success = true;
     } finally {
@@ -261,15 +258,14 @@ final class DocumentsWriter implements C
     }
   }
 
-  private final void abortThreadState(final ThreadState perThread, Set<String> newFiles) {
+  private final void abortThreadState(final ThreadState perThread) {
     assert perThread.isHeldByCurrentThread();
     if (perThread.isActive()) { // we might be closed
       if (perThread.isInitialized()) { 
         try {
           subtractFlushedNumDocs(perThread.dwpt.getNumDocsInRAM());
-          perThread.dwpt.abort(newFiles);
+          perThread.dwpt.abort();
         } finally {
-          perThread.dwpt.checkAndResetHasAborted();
           flushControl.doOnAbort(perThread);
         }
       } else {
@@ -340,7 +336,7 @@ final class DocumentsWriter implements C
     flushControl.setClosed();
   }
 
-  private boolean preUpdate() throws IOException {
+  private boolean preUpdate() throws IOException, AbortingException {
     ensureOpen();
     boolean hasEvents = false;
     if (flushControl.anyStalledThreads() || flushControl.numQueuedFlushes() > 0) {
@@ -372,7 +368,7 @@ final class DocumentsWriter implements C
     return hasEvents;
   }
 
-  private boolean postUpdate(DocumentsWriterPerThread flushingDWPT, boolean hasEvents) throws IOException {
+  private boolean postUpdate(DocumentsWriterPerThread flushingDWPT, boolean hasEvents) throws IOException, AbortingException {
     hasEvents |= applyAllDeletes(deleteQueue);
     if (flushingDWPT != null) {
       hasEvents |= doFlush(flushingDWPT);
@@ -397,7 +393,7 @@ final class DocumentsWriter implements C
   }
 
   boolean updateDocuments(final Iterable<? extends IndexDocument> docs, final Analyzer analyzer,
-                          final Term delTerm) throws IOException {
+                          final Term delTerm) throws IOException, AbortingException {
     boolean hasEvents = preUpdate();
 
     final ThreadState perThread = flushControl.obtainAndLock();
@@ -414,18 +410,15 @@ final class DocumentsWriter implements C
       final int dwptNumDocs = dwpt.getNumDocsInRAM();
       try {
         dwpt.updateDocuments(docs, analyzer, delTerm);
+      } catch (AbortingException ae) {
+        flushControl.doOnAbort(perThread);
+        dwpt.abort();
+        throw ae;
       } finally {
         // We don't know how many documents were actually
         // counted as indexed, so we must subtract here to
         // accumulate our separate counter:
         numDocsInRAM.addAndGet(dwpt.getNumDocsInRAM() - dwptNumDocs);
-        if (dwpt.checkAndResetHasAborted()) {
-          if (!dwpt.pendingFilesToDelete().isEmpty()) {
-            putEvent(new DeleteNewFilesEvent(dwpt.pendingFilesToDelete()));
-          }
-          subtractFlushedNumDocs(dwptNumDocs);
-          flushControl.doOnAbort(perThread);
-        }
       }
       final boolean isUpdate = delTerm != null;
       flushingDWPT = flushControl.doAfterDocument(perThread, isUpdate);
@@ -437,7 +430,7 @@ final class DocumentsWriter implements C
   }
 
   boolean updateDocument(final IndexDocument doc, final Analyzer analyzer,
-      final Term delTerm) throws IOException {
+      final Term delTerm) throws IOException, AbortingException {
 
     boolean hasEvents = preUpdate();
 
@@ -455,18 +448,15 @@ final class DocumentsWriter implements C
       final int dwptNumDocs = dwpt.getNumDocsInRAM();
       try {
         dwpt.updateDocument(doc, analyzer, delTerm); 
+      } catch (AbortingException ae) {
+        flushControl.doOnAbort(perThread);
+        dwpt.abort();
+        throw ae;
       } finally {
         // We don't know whether the document actually
         // counted as being indexed, so we must subtract here to
         // accumulate our separate counter:
         numDocsInRAM.addAndGet(dwpt.getNumDocsInRAM() - dwptNumDocs);
-        if (dwpt.checkAndResetHasAborted()) {
-          if (!dwpt.pendingFilesToDelete().isEmpty()) {
-            putEvent(new DeleteNewFilesEvent(dwpt.pendingFilesToDelete()));
-          }
-          subtractFlushedNumDocs(dwptNumDocs);
-          flushControl.doOnAbort(perThread);
-        }
       }
       final boolean isUpdate = delTerm != null;
       flushingDWPT = flushControl.doAfterDocument(perThread, isUpdate);
@@ -477,12 +467,13 @@ final class DocumentsWriter implements C
     return postUpdate(flushingDWPT, hasEvents);
   }
 
-  private  boolean doFlush(DocumentsWriterPerThread flushingDWPT) throws IOException {
+  private boolean doFlush(DocumentsWriterPerThread flushingDWPT) throws IOException, AbortingException {
     boolean hasEvents = false;
     while (flushingDWPT != null) {
       hasEvents = true;
       boolean success = false;
       SegmentFlushTicket ticket = null;
+      Throwable exc = null;
       try {
         assert currentFullFlushDelQueue == null
             || flushingDWPT.deleteQueue == currentFullFlushDelQueue : "expected: "
@@ -548,7 +539,6 @@ final class DocumentsWriter implements C
         }
       } finally {
         flushControl.doAfterFlush(flushingDWPT);
-        flushingDWPT.checkAndResetHasAborted();
       }
      
       flushingDWPT = flushControl.nextPendingFlush();
@@ -597,8 +587,8 @@ final class DocumentsWriter implements C
    * two stage operation; the caller must ensure (in try/finally) that finishFlush
    * is called after this method, to release the flush lock in DWFlushControl
    */
-  final boolean flushAllThreads(final IndexWriter indexWriter)
-    throws IOException {
+  final boolean flushAllThreads()
+    throws IOException, AbortingException {
     final DocumentsWriterDeleteQueue flushingDeleteQueue;
     if (infoStream.isEnabled("DW")) {
       infoStream.message("DW", "startFullFlush");
@@ -631,7 +621,7 @@ final class DocumentsWriter implements C
         }
         ticketQueue.addDeletes(flushingDeleteQueue);
       } 
-      ticketQueue.forcePurge(indexWriter);
+      ticketQueue.forcePurge(writer);
       assert !flushingDeleteQueue.anyChanges() && !ticketQueue.hasTickets();
     } finally {
       assert flushingDeleteQueue == currentFullFlushDelQueue;
@@ -649,9 +639,7 @@ final class DocumentsWriter implements C
         // Release the flush lock
         flushControl.finishFullFlush();
       } else {
-        Set<String> newFilesSet = new HashSet<>();
-        flushControl.abortFullFlushes(newFilesSet);
-        putEvent(new DeleteNewFilesEvent(newFilesSet));
+        flushControl.abortFullFlushes();
 
       }
     } finally {

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java Mon Dec  8 15:41:02 2014
@@ -310,7 +310,7 @@ final class DocumentsWriterFlushControl
       dwpt = perThreadPool.reset(perThread, closed);
       numPending--;
       blockedFlushes.add(new BlockedFlush(dwpt, bytes));
-    }finally {
+    } finally {
       perThread.unlock();
     }
   }
@@ -611,20 +611,20 @@ final class DocumentsWriterFlushControl
     return true;
   }
 
-  synchronized void abortFullFlushes(Set<String> newFiles) {
+  synchronized void abortFullFlushes() {
    try {
-     abortPendingFlushes(newFiles);
+     abortPendingFlushes();
    } finally {
      fullFlush = false;
    }
   }
   
-  synchronized void abortPendingFlushes(Set<String> newFiles) {
+  synchronized void abortPendingFlushes() {
     try {
       for (DocumentsWriterPerThread dwpt : flushQueue) {
         try {
           documentsWriter.subtractFlushedNumDocs(dwpt.getNumDocsInRAM());
-          dwpt.abort(newFiles);
+          dwpt.abort();
         } catch (Throwable ex) {
           // ignore - keep on aborting the flush queue
         } finally {
@@ -636,7 +636,7 @@ final class DocumentsWriterFlushControl
           flushingWriters
               .put(blockedFlush.dwpt, Long.valueOf(blockedFlush.bytes));
           documentsWriter.subtractFlushedNumDocs(blockedFlush.dwpt.getNumDocsInRAM());
-          blockedFlush.dwpt.abort(newFiles);
+          blockedFlush.dwpt.abort();
         } catch (Throwable ex) {
           // ignore - keep on aborting the blocked queue
         } finally {

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java Mon Dec  8 15:41:02 2014
@@ -112,9 +112,9 @@ class DocumentsWriterPerThread {
    *  updating the index files) and must discard all
    *  currently buffered docs.  This resets our state,
    *  discarding any docs added since last flush. */
-  void abort(Set<String> createdFiles) {
+  void abort() {
     //System.out.println(Thread.currentThread().getName() + ": now abort seg=" + segmentInfo.name);
-    hasAborted = aborting = true;
+    aborted = true;
     try {
       if (infoStream.isEnabled("DWPT")) {
         infoStream.message("DWPT", "now abort");
@@ -125,9 +125,7 @@ class DocumentsWriterPerThread {
       }
 
       pendingUpdates.clear();
-      createdFiles.addAll(directory.getCreatedFiles());
     } finally {
-      aborting = false;
       if (infoStream.isEnabled("DWPT")) {
         infoStream.message("DWPT", "done abort");
       }
@@ -145,8 +143,7 @@ class DocumentsWriterPerThread {
   // Updates for our still-in-RAM (to be flushed next) segment
   final BufferedUpdates pendingUpdates;
   private final SegmentInfo segmentInfo;     // Current segment we are working on
-  boolean aborting = false;   // True if an abort is pending
-  boolean hasAborted = false; // True if the last exception throws by #updateDocument was aborting
+  boolean aborted = false;   // True if we aborted
 
   private final FieldInfos.Builder fieldInfos;
   private final InfoStream infoStream;
@@ -189,20 +186,10 @@ class DocumentsWriterPerThread {
     consumer = indexWriterConfig.getIndexingChain().getChain(this);
   }
   
-  void setAborting() {
-    aborting = true;
-  }
-
   public FieldInfos.Builder getFieldInfosBuilder() {
     return fieldInfos;
   }
 
-  boolean checkAndResetHasAborted() {
-    final boolean retval = hasAborted;
-    hasAborted = false;
-    return retval;
-  }
-  
   final void testPoint(String message) {
     if (infoStream.isEnabled("TP")) {
       infoStream.message("TP", message);
@@ -219,7 +206,7 @@ class DocumentsWriterPerThread {
     }
   }
 
-  public void updateDocument(IndexDocument doc, Analyzer analyzer, Term delTerm) throws IOException {
+  public void updateDocument(IndexDocument doc, Analyzer analyzer, Term delTerm) throws IOException, AbortingException {
     testPoint("DocumentsWriterPerThread addDocument start");
     assert deleteQueue != null;
     docState.doc = doc;
@@ -245,19 +232,15 @@ class DocumentsWriterPerThread {
       success = true;
     } finally {
       if (!success) {
-        if (!aborting) {
-          // mark document as deleted
-          deleteDocID(docState.docID);
-          numDocsInRAM++;
-        } else {
-          abort(filesToDelete);
-        }
+        // mark document as deleted
+        deleteDocID(docState.docID);
+        numDocsInRAM++;
       }
     }
     finishDocument(delTerm);
   }
 
-  public int updateDocuments(Iterable<? extends IndexDocument> docs, Analyzer analyzer, Term delTerm) throws IOException {
+  public int updateDocuments(Iterable<? extends IndexDocument> docs, Analyzer analyzer, Term delTerm) throws IOException, AbortingException {
     testPoint("DocumentsWriterPerThread addDocuments start");
     assert deleteQueue != null;
     docState.analyzer = analyzer;
@@ -286,14 +269,9 @@ class DocumentsWriterPerThread {
           success = true;
         } finally {
           if (!success) {
-            // An exc is being thrown...
-            if (!aborting) {
-              // Incr here because finishDocument will not
-              // be called (because an exc is being thrown):
-              numDocsInRAM++;
-            } else {
-              abort(filesToDelete);
-            }
+            // Incr here because finishDocument will not
+            // be called (because an exc is being thrown):
+            numDocsInRAM++;
           }
         }
         finishDocument(null);
@@ -310,7 +288,7 @@ class DocumentsWriterPerThread {
       }
 
     } finally {
-      if (!allDocsIndexed && !aborting) {
+      if (!allDocsIndexed && !aborted) {
         // the iterator threw an exception that is not aborting 
         // go and mark all docs from this block as deleted
         int docID = numDocsInRAM-1;
@@ -403,7 +381,7 @@ class DocumentsWriterPerThread {
   }
 
   /** Flush all pending docs to a new segment */
-  FlushedSegment flush() throws IOException {
+  FlushedSegment flush() throws IOException, AbortingException {
     assert numDocsInRAM > 0;
     assert deleteSlice.isEmpty() : "all deletes must be applied in prepareFlush";
     segmentInfo.setDocCount(numDocsInRAM);
@@ -424,7 +402,7 @@ class DocumentsWriterPerThread {
       pendingUpdates.docIDs.clear();
     }
 
-    if (aborting) {
+    if (aborted) {
       if (infoStream.isEnabled("DWPT")) {
         infoStream.message("DWPT", "flush: skip because aborting is set");
       }
@@ -435,8 +413,6 @@ class DocumentsWriterPerThread {
       infoStream.message("DWPT", "flush postings as segment " + flushState.segmentInfo.name + " numDocs=" + numDocsInRAM);
     }
 
-    boolean success = false;
-
     try {
       consumer.flush(flushState);
       pendingUpdates.terms.clear();
@@ -476,13 +452,11 @@ class DocumentsWriterPerThread {
       FlushedSegment fs = new FlushedSegment(segmentInfoPerCommit, flushState.fieldInfos,
                                              segmentDeletes, flushState.liveDocs, flushState.delCountOnFlush);
       sealFlushedSegment(fs);
-      success = true;
 
       return fs;
-    } finally {
-      if (!success) {
-        abort(filesToDelete);
-      }
+    } catch (Throwable th) {
+      abort();
+      throw new AbortingException(th);
     }
   }
   
@@ -603,7 +577,7 @@ class DocumentsWriterPerThread {
   @Override
   public String toString() {
     return "DocumentsWriterPerThread [pendingDeletes=" + pendingUpdates
-      + ", segment=" + (segmentInfo != null ? segmentInfo.name : "null") + ", aborting=" + aborting + ", numDocsInRAM="
+      + ", segment=" + (segmentInfo != null ? segmentInfo.name : "null") + ", aborted=" + aborted + ", numDocsInRAM="
         + numDocsInRAM + ", deleteQueue=" + deleteQueue + "]";
   }
   

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java Mon Dec  8 15:41:02 2014
@@ -343,7 +343,7 @@ final class IndexFileDeleter implements
     }
   }
 
-  private void ensureOpen() throws AlreadyClosedException {
+  void ensureOpen() throws AlreadyClosedException {
     writer.ensureOpen(false);
     // since we allow 'closing' state, we must still check this, we could be closing because we hit e.g. OOM
     if (writer.tragedy != null) {
@@ -351,6 +351,16 @@ final class IndexFileDeleter implements
     }
   }
 
+  // for testing
+  boolean isClosed() {
+    try {
+      ensureOpen();
+      return false;
+    } catch (AlreadyClosedException ace) {
+      return true;
+    }
+  }
+
   public SegmentInfos getLastSegmentInfos() {
     return lastSegmentInfos;
   }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java Mon Dec  8 15:41:02 2014
@@ -405,7 +405,7 @@ public class IndexWriter implements Clos
       synchronized (fullFlushLock) {
         boolean success = false;
         try {
-          anySegmentFlushed = docWriter.flushAllThreads(this);
+          anySegmentFlushed = docWriter.flushAllThreads();
           if (!anySegmentFlushed) {
             // prevent double increment since docWriter#doFlush increments the flushcount
             // if we flushed anything.
@@ -422,8 +422,8 @@ public class IndexWriter implements Clos
               infoStream.message("IW", "return reader version=" + r.getVersion() + " reader=" + r);
             }
           }
-        } catch (OutOfMemoryError oom) {
-          tragicEvent(oom, "getReader");
+        } catch (AbortingException | OutOfMemoryError tragedy) {
+          tragicEvent(tragedy, "getReader");
           // never reached but javac disagrees:
           return null;
         } finally {
@@ -1231,8 +1231,8 @@ public class IndexWriter implements Clos
           }
         }
       }
-    } catch (OutOfMemoryError oom) {
-      tragicEvent(oom, "updateDocuments");
+    } catch (AbortingException | OutOfMemoryError tragedy) {
+      tragicEvent(tragedy, "updateDocuments");
     }
   }
 
@@ -1401,8 +1401,8 @@ public class IndexWriter implements Clos
           }
         }
       }
-    } catch (OutOfMemoryError oom) {
-      tragicEvent(oom, "updateDocument");
+    } catch (AbortingException | OutOfMemoryError tragedy) {
+      tragicEvent(tragedy, "updateDocument");
     }
   }
 
@@ -2723,7 +2723,7 @@ public class IndexWriter implements Clos
           boolean flushSuccess = false;
           boolean success = false;
           try {
-            anySegmentsFlushed = docWriter.flushAllThreads(this);
+            anySegmentsFlushed = docWriter.flushAllThreads();
             if (!anySegmentsFlushed) {
               // prevent double increment since docWriter#doFlush increments the flushcount
               // if we flushed anything.
@@ -2766,8 +2766,8 @@ public class IndexWriter implements Clos
             doAfterFlush();
           }
         }
-      } catch (OutOfMemoryError oom) {
-        tragicEvent(oom, "prepareCommit");
+      } catch (AbortingException | OutOfMemoryError tragedy) {
+        tragicEvent(tragedy, "prepareCommit");
       }
      
       boolean success = false;
@@ -3008,7 +3008,7 @@ public class IndexWriter implements Clos
       synchronized (fullFlushLock) {
       boolean flushSuccess = false;
         try {
-          anySegmentFlushed = docWriter.flushAllThreads(this);
+          anySegmentFlushed = docWriter.flushAllThreads();
           flushSuccess = true;
         } finally {
           docWriter.finishFullFlush(flushSuccess);
@@ -3025,8 +3025,8 @@ public class IndexWriter implements Clos
         success = true;
         return anySegmentFlushed;
       }
-    } catch (OutOfMemoryError oom) {
-      tragicEvent(oom, "doFlush");
+    } catch (AbortingException | OutOfMemoryError tragedy) {
+      tragicEvent(tragedy, "doFlush");
       // never hit
       return false;
     } finally {
@@ -4253,7 +4253,9 @@ public class IndexWriter implements Clos
 
       synchronized(this) {
 
-        assert lastCommitChangeCount <= changeCount: "lastCommitChangeCount=" + lastCommitChangeCount + " changeCount=" + changeCount;
+        if (lastCommitChangeCount > changeCount) {
+          throw new IllegalStateException("lastCommitChangeCount=" + lastCommitChangeCount + ",changeCount=" + changeCount);
+        }
 
         if (pendingCommitChangeCount == lastCommitChangeCount) {
           if (infoStream.isEnabled("IW")) {
@@ -4381,19 +4383,28 @@ public class IndexWriter implements Clos
     public abstract void warm(LeafReader reader) throws IOException;
   }
 
-  private void tragicEvent(Throwable tragedy, String location) {
+  void tragicEvent(Throwable tragedy, String location) throws IOException {
+    // unbox our internal AbortingException
+    if (tragedy instanceof AbortingException) {
+      tragedy = tragedy.getCause();
+    }
     // We cannot hold IW's lock here else it can lead to deadlock:
     assert Thread.holdsLock(this) == false;
 
+    // How can it be a tragedy when nothing happened?
+    assert tragedy != null;
+
     if (infoStream.isEnabled("IW")) {
-      infoStream.message("IW", "hit " + tragedy.getClass().getSimpleName() + " inside " + location);
+      infoStream.message("IW", "hit tragic " + tragedy.getClass().getSimpleName() + " inside " + location);
     }
+
     synchronized (this) {
-      // its possible you could have a really bad day
+      // it's possible you could have a really bad day
       if (this.tragedy == null) {
         this.tragedy = tragedy;
       }
     }
+
     // if we are already closed (e.g. called by rollback), this will be a no-op.
     synchronized(commitLock) {
       if (closing == false) {
@@ -4405,7 +4416,8 @@ public class IndexWriter implements Clos
         }
       }
     }
-    IOUtils.reThrowUnchecked(tragedy);
+
+    IOUtils.reThrow(tragedy);
   }
 
   // Used for testing.  Current points:
@@ -4571,11 +4583,13 @@ public class IndexWriter implements Clos
   }
   
   private boolean processEvents(Queue<Event> queue, boolean triggerMerge, boolean forcePurge) throws IOException {
-    Event event;
     boolean processed = false;
-    while((event = queue.poll()) != null)  {
-      processed = true;
-      event.process(this, triggerMerge, forcePurge);
+    if (tragedy == null) {
+      Event event;
+      while((event = queue.poll()) != null)  {
+        processed = true;
+        event.process(this, triggerMerge, forcePurge);
+      }
     }
     return processed;
   }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java Mon Dec  8 15:41:02 2014
@@ -142,6 +142,9 @@ public final class IndexWriterConfig ext
    *           if this config is already attached to a writer.
    */
   IndexWriterConfig setIndexWriter(IndexWriter writer) {
+    if (this.writer.get() != null) {
+      throw new IllegalStateException("do not share IndexWriterConfig instances across IndexWriters");
+    }
     this.writer.set(writer);
     return this;
   }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java Mon Dec  8 15:41:02 2014
@@ -209,8 +209,8 @@ public abstract class MergePolicy {
       while (paused) {
         try {
           // In theory we could wait() indefinitely, but we
-          // do 1000 msec, defensively
-          wait(1000);
+          // do 250 msec, defensively
+          wait(250);
         } catch (InterruptedException ie) {
           throw new RuntimeException(ie);
         }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java Mon Dec  8 15:41:02 2014
@@ -22,17 +22,17 @@ import java.io.IOException;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.IntField;
 import org.apache.lucene.index.BaseStoredFieldsFormatTestCase;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.junit.Test;
-
 import com.carrotsearch.randomizedtesting.generators.RandomInts;
 
 public class TestCompressingStoredFieldsFormat extends BaseStoredFieldsFormatTestCase {
@@ -84,17 +84,7 @@ public class TestCompressingStoredFields
       iw.commit();
     }
     finally {
-      // next event will cause IW to delete the old files: we use prepareCommit just as example
-      iw.prepareCommit();
-      int counter = 0;
-      for (String fileName : dir.listAll()) {
-        if (fileName.endsWith(".fdt") || fileName.endsWith(".fdx")) {
-          counter++;
-        }
-      }
-      // Only one .fdt and one .fdx files must have been found
-      assertEquals(2, counter);
-      iw.rollback();
+      // Abort should have closed the deleter:
       dir.close();
     }
   }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Mon Dec  8 15:41:02 2014
@@ -29,6 +29,7 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.LuceneTestCase;
@@ -81,14 +82,23 @@ public class TestConcurrentMergeSchedule
     MockDirectoryWrapper directory = newMockDirectory();
     FailOnlyOnFlush failure = new FailOnlyOnFlush();
     directory.failOn(failure);
-
-    IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random()))
-                                                      .setMaxBufferedDocs(2));
+    IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()))
+      .setMaxBufferedDocs(2);
+    if (iwc.getMergeScheduler() instanceof ConcurrentMergeScheduler) {
+      iwc.setMergeScheduler(new SuppressingConcurrentMergeScheduler() {
+          @Override
+          protected boolean isOK(Throwable th) {
+            return th instanceof AlreadyClosedException ||
+              (th instanceof IllegalStateException && th.getMessage().contains("this writer hit an unrecoverable error"));
+          }
+        });
+    }
+    IndexWriter writer = new IndexWriter(directory, iwc);
     Document doc = new Document();
     Field idField = newStringField("id", "", Field.Store.YES);
     doc.add(idField);
-    int extraCount = 0;
 
+    outer:
     for(int i=0;i<10;i++) {
       if (VERBOSE) {
         System.out.println("TEST: iter=" + i);
@@ -110,22 +120,20 @@ public class TestConcurrentMergeSchedule
           if (failure.hitExc) {
             fail("failed to hit IOException");
           }
-          extraCount++;
         } catch (IOException ioe) {
           if (VERBOSE) {
             ioe.printStackTrace(System.out);
           }
           failure.clearDoFail();
-          break;
+          assertTrue(writer.isClosed());
+          // Abort should have closed the deleter:
+          assertTrue(writer.deleter.isClosed());
+          break outer;
         }
       }
-      assertEquals(20*(i+1)+extraCount, writer.numDocs());
     }
 
-    writer.close();
-    IndexReader reader = DirectoryReader.open(directory);
-    assertEquals(200+extraCount, reader.numDocs());
-    reader.close();
+    assertFalse(DirectoryReader.indexExists(directory));
     directory.close();
   }
 
@@ -474,4 +482,72 @@ public class TestConcurrentMergeSchedule
     w.close();
     dir.close();
   }
+
+  // LUCENE-6094
+  public void testHangDuringRollback() throws Throwable {
+    Directory dir = newMockDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    iwc.setMaxBufferedDocs(2);
+    LogDocMergePolicy mp = new LogDocMergePolicy();
+    iwc.setMergePolicy(mp);
+    mp.setMergeFactor(2);
+    final CountDownLatch mergeStart = new CountDownLatch(1);
+    final CountDownLatch mergeFinish = new CountDownLatch(1);
+    ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler() {
+        @Override
+        protected void doMerge(MergePolicy.OneMerge merge) throws IOException {
+          mergeStart.countDown();
+          try {
+            mergeFinish.await();
+          } catch (InterruptedException ie) {
+            throw new RuntimeException(ie);
+          }
+          super.doMerge(merge);
+        }
+      };
+    cms.setMaxMergesAndThreads(1, 1);
+    iwc.setMergeScheduler(cms);
+
+    final IndexWriter w = new IndexWriter(dir, iwc);
+    
+    w.addDocument(new Document());
+    w.addDocument(new Document());
+    // flush
+
+    w.addDocument(new Document());
+    w.addDocument(new Document());
+    // flush + merge
+
+    // Wait for merge to kick off
+    mergeStart.await();
+
+    new Thread() {
+      @Override
+      public void run() {
+        try {
+          w.addDocument(new Document());
+          w.addDocument(new Document());
+          // flush
+
+          w.addDocument(new Document());
+          // W/o the fix for LUCENE-6094 we would hang forever here:
+          w.addDocument(new Document());
+          // flush + merge
+          
+          // Now allow first merge to finish:
+          mergeFinish.countDown();
+
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    }.start();
+
+    while (w.numDocs() != 8) {
+      Thread.sleep(10);
+    }
+
+    w.rollback();
+    dir.close();
+  }
 }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java Mon Dec  8 15:41:02 2014
@@ -906,6 +906,15 @@ public class TestIndexWriter extends Luc
       // LUCENE-2239: won't work with NIOFS/MMAP
       adder = new MockDirectoryWrapper(random, new RAMDirectory());
       IndexWriterConfig conf = newIndexWriterConfig(random, new MockAnalyzer(random));
+      if (conf.getMergeScheduler() instanceof ConcurrentMergeScheduler) {
+        conf.setMergeScheduler(new SuppressingConcurrentMergeScheduler() {
+            @Override
+            protected boolean isOK(Throwable th) {
+              return th instanceof AlreadyClosedException ||
+                (th instanceof IllegalStateException && th.getMessage().contains("this writer hit an unrecoverable error"));
+            }
+          });
+      }
       IndexWriter w = new IndexWriter(adder, conf);
       Document doc = new Document();
       doc.add(newStringField(random, "id", "500", Field.Store.NO));
@@ -969,6 +978,15 @@ public class TestIndexWriter extends Luc
             }
             IndexWriterConfig conf = newIndexWriterConfig(random,
                                                           new MockAnalyzer(random)).setMaxBufferedDocs(2);
+            if (conf.getMergeScheduler() instanceof ConcurrentMergeScheduler) {
+              conf.setMergeScheduler(new SuppressingConcurrentMergeScheduler() {
+                  @Override
+                  protected boolean isOK(Throwable th) {
+                    return th instanceof AlreadyClosedException ||
+                      (th instanceof IllegalStateException && th.getMessage().contains("this writer hit an unrecoverable error"));
+                  }
+                });
+            }
             //conf.setInfoStream(log);
             w = new IndexWriter(dir, conf);
 

Modified: lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java Mon Dec  8 15:41:02 2014
@@ -147,7 +147,7 @@ public class TestIndexWriterConfig exten
     try {
       assertNotNull(new RandomIndexWriter(random(), dir, conf));
       fail("should have hit AlreadySetException");
-    } catch (AlreadySetException e) {
+    } catch (IllegalStateException ise) {
       // expected
     }
 

Modified: lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Mon Dec  8 15:41:02 2014
@@ -632,11 +632,9 @@ public class TestIndexWriterDelete exten
           modifier.rollback();
         }
 
-        // If the close() succeeded, make sure there are
-        // no unreferenced files.
+        // If the close() succeeded, make sure index is OK:
         if (success) {
           TestUtil.checkIndex(dir);
-          TestIndexWriter.assertNoUnreferencedFiles(dir, "after writer.close");
         }
         dir.setRandomIOExceptionRate(randomIOExceptionRate);
         dir.setMaxSizeInBytes(maxSizeInBytes);
@@ -922,8 +920,8 @@ public class TestIndexWriterDelete exten
         break;
       }
     }
+    assertTrue(modifier.deleter.isClosed());
 
-    modifier.close();
     TestIndexWriter.assertNoUnreferencedFiles(dir, "docsWriter.abort() failed to delete unreferenced files");
     dir.close();
   }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Mon Dec  8 15:41:02 2014
@@ -575,8 +575,7 @@ public class TestIndexWriterExceptions e
     }
   }
 
-  // LUCENE-1072: make sure an errant exception on flushing
-  // one segment only takes out those docs in that one flush
+  // make sure an aborting exception closes the writer:
   public void testDocumentsWriterAbort() throws IOException {
     MockDirectoryWrapper dir = newMockDirectory();
     FailOnlyOnFlush failure = new FailOnlyOnFlush();
@@ -589,20 +588,18 @@ public class TestIndexWriterExceptions e
     String contents = "aa bb cc dd ee ff gg hh ii jj kk";
     doc.add(newTextField("content", contents, Field.Store.NO));
     boolean hitError = false;
-    for(int i=0;i<200;i++) {
-      try {
-        writer.addDocument(doc);
-      } catch (IOException ioe) {
-        // only one flush should fail:
-        assertFalse(hitError);
-        hitError = true;
-      }
+    writer.addDocument(doc);
+    try {
+      writer.addDocument(doc);
+      fail("did not hit exception");
+    } catch (IOException ioe) {
+      // only one flush should fail:
+      assertFalse(hitError);
+      hitError = true;
+      assertTrue(writer.deleter.isClosed());
+      assertTrue(writer.isClosed());
     }
-    assertTrue(hitError);
-    writer.close();
-    IndexReader reader = DirectoryReader.open(dir);
-    assertEquals(198, reader.docFreq(new Term("content", "aa")));
-    reader.close();
+    assertFalse(DirectoryReader.indexExists(dir));
     dir.close();
   }
 
@@ -1265,6 +1262,7 @@ public class TestIndexWriterExceptions e
         new FailOnTermVectors(FailOnTermVectors.AFTER_INIT_STAGE),
         new FailOnTermVectors(FailOnTermVectors.INIT_STAGE), };
     int num = atLeast(1);
+    iters:
     for (int j = 0; j < num; j++) {
       for (FailOnTermVectors failure : failures) {
         MockDirectoryWrapper dir = newMockDirectory();
@@ -1273,14 +1271,19 @@ public class TestIndexWriterExceptions e
         int numDocs = 10 + random().nextInt(30);
         for (int i = 0; i < numDocs; i++) {
           Document doc = new Document();
+          // random TV
           Field field = newTextField(random(), "field", "a field", Field.Store.YES);
           doc.add(field);
-          // random TV
           try {
             w.addDocument(doc);
             assertFalse(field.fieldType().storeTermVectors());
           } catch (RuntimeException e) {
             assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
+            // This is an aborting exception, so writer is closed:
+            assertTrue(w.deleter.isClosed());
+            assertTrue(w.isClosed());
+            dir.close();
+            continue iters;
           }
           if (random().nextInt(20) == 0) {
             w.commit();

Modified: lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions2.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions2.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions2.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions2.java Mon Dec  8 15:41:02 2014
@@ -40,14 +40,15 @@ import org.apache.lucene.document.Sorted
 import org.apache.lucene.document.SortedSetDocValuesField;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.TextField;
+import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.TestUtil;
 import org.apache.lucene.util.Rethrow;
-import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.TestUtil;
 
 /** 
  * Causes a bunch of non-aborting and aborting exceptions and checks that
@@ -63,6 +64,7 @@ public class TestIndexWriterExceptions2
     if (dir instanceof MockDirectoryWrapper) {
       ((MockDirectoryWrapper)dir).setThrottling(MockDirectoryWrapper.Throttling.NEVER);
       ((MockDirectoryWrapper)dir).setUseSlowOpenClosers(false);
+      ((MockDirectoryWrapper)dir).setPreventDoubleWrite(false);
     }
     
     // log all exceptions we hit, in case we fail (for debugging)
@@ -96,11 +98,12 @@ public class TestIndexWriterExceptions2
     // just for now, try to keep this test reproducible
     conf.setMergeScheduler(new SerialMergeScheduler());
     conf.setCodec(codec);
-    
+
     int numDocs = atLeast(500);
     
     IndexWriter iw = new IndexWriter(dir, conf);
     try {
+      boolean allowAlreadyClosed = false;
       for (int i = 0; i < numDocs; i++) {
         // TODO: add crankyDocValuesFields, etc
         Document doc = new Document();
@@ -136,10 +139,21 @@ public class TestIndexWriterExceptions2
             } else if (thingToDo == 2) {
               iw.updateBinaryDocValue(new Term("id", Integer.toString(i)), "dv2", new BytesRef(Integer.toString(i+1)));
             }
+          } catch (AlreadyClosedException ace) {
+            // OK: writer was closed by abort; we just reopen now:
+            assertTrue(iw.deleter.isClosed());
+            assertTrue(allowAlreadyClosed);
+            allowAlreadyClosed = false;
+            conf = newIndexWriterConfig(analyzer);
+            // just for now, try to keep this test reproducible
+            conf.setMergeScheduler(new SerialMergeScheduler());
+            conf.setCodec(codec);
+            iw = new IndexWriter(dir, conf);            
           } catch (Exception e) {
             if (e.getMessage() != null && e.getMessage().startsWith("Fake IOException")) {
               exceptionStream.println("\nTEST: got expected fake exc:" + e.getMessage());
               e.printStackTrace(exceptionStream);
+              allowAlreadyClosed = true;
             } else {
               Rethrow.rethrow(e);
             }
@@ -159,10 +173,21 @@ public class TestIndexWriterExceptions2
             if (random().nextBoolean()) {
               iw.deleteDocuments(new Term("id", Integer.toString(i)), new Term("id", Integer.toString(-i)));
             }
+          } catch (AlreadyClosedException ace) {
+            // OK: writer was closed by abort; we just reopen now:
+            assertTrue(iw.deleter.isClosed());
+            assertTrue(allowAlreadyClosed);
+            allowAlreadyClosed = false;
+            conf = newIndexWriterConfig(analyzer);
+            // just for now, try to keep this test reproducible
+            conf.setMergeScheduler(new SerialMergeScheduler());
+            conf.setCodec(codec);
+            iw = new IndexWriter(dir, conf);            
           } catch (Exception e) {
             if (e.getMessage() != null && e.getMessage().startsWith("Fake IOException")) {
               exceptionStream.println("\nTEST: got expected fake exc:" + e.getMessage());
               e.printStackTrace(exceptionStream);
+              allowAlreadyClosed = true;
             } else {
               Rethrow.rethrow(e);
             }
@@ -186,10 +211,21 @@ public class TestIndexWriterExceptions2
             if (DirectoryReader.indexExists(dir)) {
               TestUtil.checkIndex(dir);
             }
+          } catch (AlreadyClosedException ace) {
+            // OK: writer was closed by abort; we just reopen now:
+            assertTrue(iw.deleter.isClosed());
+            assertTrue(allowAlreadyClosed);
+            allowAlreadyClosed = false;
+            conf = newIndexWriterConfig(analyzer);
+            // just for now, try to keep this test reproducible
+            conf.setMergeScheduler(new SerialMergeScheduler());
+            conf.setCodec(codec);
+            iw = new IndexWriter(dir, conf);            
           } catch (Exception e) {
             if (e.getMessage() != null && e.getMessage().startsWith("Fake IOException")) {
               exceptionStream.println("\nTEST: got expected fake exc:" + e.getMessage());
               e.printStackTrace(exceptionStream);
+              allowAlreadyClosed = true;
             } else {
               Rethrow.rethrow(e);
             }

Modified: lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java?rev=1643845&r1=1643844&r2=1643845&view=diff
==============================================================================
--- lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java (original)
+++ lucene/dev/branches/lucene2878/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java Mon Dec  8 15:41:02 2014
@@ -57,6 +57,7 @@ public class TestIndexWriterOnDiskFull e
       }
       boolean doAbort = pass == 1;
       long diskFree = TestUtil.nextInt(random(), 100, 300);
+      boolean indexExists = false;
       while(true) {
         if (VERBOSE) {
           System.out.println("TEST: cycle: diskFree=" + diskFree);
@@ -82,6 +83,7 @@ public class TestIndexWriterOnDiskFull e
             System.out.println("TEST: done adding docs; now commit");
           }
           writer.commit();
+          indexExists = true;
         } catch (IOException e) {
           if (VERBOSE) {
             System.out.println("TEST: exception on addDoc");
@@ -118,9 +120,7 @@ public class TestIndexWriterOnDiskFull e
 
           //_TestUtil.syncConcurrentMerges(ms);
 
-          if (TestUtil.anyFilesExceptWriteLock(dir)) {
-            assertNoUnreferencedFiles(dir, "after disk full during addDocument");
-            
+          if (indexExists) {
             // Make sure reader can open the index:
             DirectoryReader.open(dir).close();
           }
@@ -548,25 +548,9 @@ public class TestIndexWriterOnDiskFull e
       writer.addDocument(doc);
       fail("did not hit disk full");
     } catch (IOException ioe) {
+      assertTrue(writer.deleter.isClosed());
+      assertTrue(writer.isClosed());
     }
-    // Without fix for LUCENE-1130: this call will hang:
-    try {
-      writer.addDocument(doc);
-      fail("did not hit disk full");
-    } catch (IOException ioe) {
-    }
-    try {
-      writer.commit();
-      fail("did not hit disk full");
-    } catch (IOException ioe) {
-    } finally {
-      writer.close();
-    }
-
-    // Make sure once disk space is avail again, we can
-    // cleanly close:
-    dir.setMaxSizeInBytes(0);
-    writer.close();
     dir.close();
   }