You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by sh...@apache.org on 2013/10/16 09:43:18 UTC

svn commit: r1532670 [2/2] - in /lucene/dev/trunk/lucene/core/src: java/org/apache/lucene/index/ test/org/apache/lucene/index/

Modified: lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=1532670&r1=1532669&r2=1532670&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Wed Oct 16 07:43:18 2013
@@ -28,8 +28,13 @@ import java.util.List;
 import java.util.Random;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import org.apache.lucene.analysis.*;
-import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -49,6 +54,7 @@ import org.apache.lucene.store.IOContext
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.MockDirectoryWrapper.FakeIOException;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
@@ -1685,7 +1691,226 @@ public class TestIndexWriterExceptions e
 
     dir.close();
   }
-  
-  
+
+  // Make sure if we hit a transient IOException (e.g., disk
+  // full), and then the exception stops (e.g., disk frees
+  // up), so we successfully close IW or open an NRT
+  // reader, we don't lose any deletes or updates:
+  public void testNoLostDeletesOrUpdates() throws Exception {
+    int deleteCount = 0;
+    int docBase = 0;
+    int docCount = 0;
+
+    MockDirectoryWrapper dir = newMockDirectory();
+    final AtomicBoolean shouldFail = new AtomicBoolean();
+    dir.failOn(new MockDirectoryWrapper.Failure() {
+      
+      @Override
+      public void eval(MockDirectoryWrapper dir) throws IOException {
+        StackTraceElement[] trace = new Exception().getStackTrace();
+        if (shouldFail.get() == false) {
+          return;
+        }
+        
+        boolean sawSeal = false;
+        boolean sawWrite = false;
+        for (int i = 0; i < trace.length; i++) {
+          if ("sealFlushedSegment".equals(trace[i].getMethodName())) {
+            sawSeal = true;
+            break;
+          }
+          if ("writeLiveDocs".equals(trace[i].getMethodName()) || "writeFieldUpdates".equals(trace[i].getMethodName())) {
+            sawWrite = true;
+          }
+        }
+        
+        // Don't throw exc if we are "flushing", else
+        // the segment is aborted and docs are lost:
+        if (sawWrite && sawSeal == false && random().nextInt(3) == 2) {
+          // Only sometimes throw the exc, so we get
+          // it sometimes on creating the file, on
+          // flushing buffer, on closing the file:
+          if (VERBOSE) {
+            System.out.println("TEST: now fail; thread=" + Thread.currentThread().getName() + " exc:");
+            new Throwable().printStackTrace(System.out);
+          }
+          shouldFail.set(false);
+          throw new FakeIOException();
+        }
+      }
+    });
+    
+    RandomIndexWriter w = null;
+
+    for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
+      int numDocs = atLeast(100);
+      if (VERBOSE) {
+        System.out.println("\nTEST: iter=" + iter + " numDocs=" + numDocs + " docBase=" + docBase + " delCount=" + deleteCount);
+      }
+      if (w == null) {
+        IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+        final MergeScheduler ms = iwc.getMergeScheduler();
+        if (ms instanceof ConcurrentMergeScheduler) {
+          final ConcurrentMergeScheduler suppressFakeIOE = new ConcurrentMergeScheduler() {
+              @Override
+              protected void handleMergeException(Throwable exc) {
+                // suppress only FakeIOException:
+                if (!(exc instanceof FakeIOException)) {
+                  super.handleMergeException(exc);
+                }
+              }
+            };
+          final ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) ms;
+          suppressFakeIOE.setMaxMergesAndThreads(cms.getMaxMergeCount(), cms.getMaxThreadCount());
+          suppressFakeIOE.setMergeThreadPriority(cms.getMergeThreadPriority());
+          iwc.setMergeScheduler(suppressFakeIOE);
+        }
+        
+        w = new RandomIndexWriter(random(), dir, iwc);
+        // Since we hit exc during merging, a partial
+        // forceMerge can easily return when there are still
+        // too many segments in the index:
+        w.setDoRandomForceMergeAssert(false);
+      }
+      for(int i=0;i<numDocs;i++) {
+        Document doc = new Document();
+        doc.add(new StringField("id", ""+(docBase+i), Field.Store.NO));
+        doc.add(new NumericDocValuesField("f", 1L));
+        doc.add(new NumericDocValuesField("cf", 2L));
+        w.addDocument(doc);
+      }
+      docCount += numDocs;
+
+      // TODO: we could make the test more evil, by letting
+      // it throw more than one exc, randomly, before "recovering"
+
+      // TODO: we could also install an infoStream and try
+      // to fail in "more evil" places inside BDS
+
+      shouldFail.set(true);
+      boolean doClose = false;
+
+      try {
+
+        boolean defaultCodecSupportsFieldUpdates = defaultCodecSupportsFieldUpdates();
+        for(int i=0;i<numDocs;i++) {
+          if (random().nextInt(10) == 7) {
+            boolean fieldUpdate = defaultCodecSupportsFieldUpdates && random().nextBoolean();
+            if (fieldUpdate) {
+              long value = iter;
+              if (VERBOSE) {
+                System.out.println("  update id=" + (docBase+i) + " to value " + value);
+              }
+              w.updateNumericDocValue(new Term("id", Integer.toString(docBase + i)), "f", value);
+              w.updateNumericDocValue(new Term("id", Integer.toString(docBase + i)), "cf", value * 2);
+            }
+            
+            // sometimes do both deletes and updates
+            if (!fieldUpdate || random().nextBoolean()) {
+              if (VERBOSE) {
+                System.out.println("  delete id=" + (docBase+i));
+              }
+              deleteCount++;
+              w.deleteDocuments(new Term("id", ""+(docBase+i)));
+            }
+          }
+        }
+
+        // Trigger writeLiveDocs so we hit fake exc:
+        IndexReader r = w.getReader(true);
+
+        // Sometimes we will make it here (we only randomly
+        // throw the exc):
+        assertEquals(docCount-deleteCount, r.numDocs());
+        r.close();
+        
+        // Sometimes close, so the disk full happens on close:
+        if (random().nextBoolean()) {
+          if (VERBOSE) {
+            System.out.println("  now close writer");
+          }
+          doClose = true;
+          w.close();
+          w = null;
+        }
+
+      } catch (IOException ioe) {
+        // FakeIOException can be thrown from mergeMiddle, in which case IW
+        // registers it before our CMS gets to suppress it. IW.forceMerge later
+        // throws it as a wrapped IOE, so don't fail in this case.
+        if (ioe instanceof FakeIOException || (ioe.getCause() != null && ioe.getCause() instanceof FakeIOException)) {
+          // expected
+          if (VERBOSE) {
+            System.out.println("TEST: w.close() hit expected IOE");
+          }
+        } else {
+          throw ioe;
+        }
+      }
+      shouldFail.set(false);
+
+      IndexReader r;
+
+      if (doClose && w != null) {
+        if (VERBOSE) {
+          System.out.println("  now 2nd close writer");
+        }
+        w.close();
+        w = null;
+      }
+
+      if (w == null || random().nextBoolean()) {
+        // Open non-NRT reader, to make sure the "on
+        // disk" bits are good:
+        if (VERBOSE) {
+          System.out.println("TEST: verify against non-NRT reader");
+        }
+        if (w != null) {
+          w.commit();
+        }
+        r = DirectoryReader.open(dir);
+      } else {
+        if (VERBOSE) {
+          System.out.println("TEST: verify against NRT reader");
+        }
+        r = w.getReader();
+      }
+      assertEquals(docCount-deleteCount, r.numDocs());
+      for (AtomicReaderContext context : r.leaves()) {
+        Bits liveDocs = context.reader().getLiveDocs();
+        NumericDocValues f = context.reader().getNumericDocValues("f");
+        NumericDocValues cf = context.reader().getNumericDocValues("cf");
+        for (int i = 0; i < context.reader().maxDoc(); i++) {
+          if (liveDocs == null || liveDocs.get(i)) {
+            assertEquals("doc=" + (docBase + i), cf.get(i), f.get(i) * 2);
+          }
+        }
+      }
+
+      r.close();
+
+      // Sometimes re-use RIW, other times open new one:
+      if (w != null && random().nextBoolean()) {
+        if (VERBOSE) {
+          System.out.println("TEST: close writer");
+        }
+        w.close();
+        w = null;
+      }
+
+      docBase += numDocs;
+    }
+
+    if (w != null) {
+      w.close();
+    }
+
+    // Final verify:
+    IndexReader r = DirectoryReader.open(dir);
+    assertEquals(docCount-deleteCount, r.numDocs());
+    r.close();
+
+    dir.close();
+  }
 
 }

Modified: lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java?rev=1532670&r1=1532669&r2=1532670&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java (original)
+++ lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java Wed Oct 16 07:43:18 2013
@@ -747,7 +747,7 @@ public class TestNumericDocValuesUpdates
         Document doc = new Document();
         doc.add(new StringField("id", "doc-" + docID, Store.NO));
         doc.add(new StringField("key", "all", Store.NO)); // update key
-        // add all fields with their current (updated value)
+        // add all fields with their current value
         for (int f = 0; f < fieldValues.length; f++) {
           doc.add(new NumericDocValuesField("f" + f, fieldValues[f]));
         }
@@ -804,7 +804,7 @@ public class TestNumericDocValuesUpdates
           int maxDoc = r.maxDoc();
           for (int doc = 0; doc < maxDoc; doc++) {
             if (liveDocs == null || liveDocs.get(doc)) {
-              //              System.out.println("doc=" + (doc + context.docBase) + " f='" + f + "' vslue=" + ndv.get(doc));
+//              System.out.println("doc=" + (doc + context.docBase) + " f='" + f + "' vslue=" + ndv.get(doc));
               if (fieldHasValue[field]) {
                 assertTrue(docsWithField.get(doc));
                 assertEquals("invalid value for doc=" + doc + ", field=" + f + ", reader=" + r, fieldValues[field], ndv.get(doc));
@@ -1282,6 +1282,75 @@ public class TestNumericDocValuesUpdates
     writer.close();
     dir.close();
   }
+
+  @Test
+  public void testTonsOfUpdates() throws Exception {
+    // LUCENE-5248: make sure that when there are many updates, we don't use too much RAM
+    Directory dir = newDirectory();
+    final Random random = random();
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    conf.setRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
+    conf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); // don't flush by doc
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    // test data: lots of documents (few 10Ks) and lots of update terms (few hundreds)
+    final int numDocs = atLeast(20000);
+    final int numNumericFields = atLeast(5);
+    final int numTerms = _TestUtil.nextInt(random, 10, 100); // terms should affect many docs
+    Set<String> updateTerms = new HashSet<String>();
+    while (updateTerms.size() < numTerms) {
+      updateTerms.add(_TestUtil.randomSimpleString(random));
+    }
+
+//    System.out.println("numDocs=" + numDocs + " numNumericFields=" + numNumericFields + " numTerms=" + numTerms);
+    
+    // build a large index with many NDV fields and update terms
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = new Document();
+      int numUpdateTerms = _TestUtil.nextInt(random, 1, numTerms / 10);
+      for (int j = 0; j < numUpdateTerms; j++) {
+        doc.add(new StringField("upd", RandomPicks.randomFrom(random, updateTerms), Store.NO));
+      }
+      for (int j = 0; j < numNumericFields; j++) {
+        long val = random.nextInt();
+        doc.add(new NumericDocValuesField("f" + j, val));
+        doc.add(new NumericDocValuesField("cf" + j, val * 2));
+      }
+      writer.addDocument(doc);
+    }
+    
+    writer.commit(); // commit so there's something to apply to
+    
+    // set to flush every 2048 bytes (approximately every 12 updates), so we get
+    // many flushes during numeric updates
+    writer.getConfig().setRAMBufferSizeMB(2048.0 / 1024 / 1024);
+    final int numUpdates = atLeast(100);
+//    System.out.println("numUpdates=" + numUpdates);
+    for (int i = 0; i < numUpdates; i++) {
+      int field = random.nextInt(numNumericFields);
+      Term updateTerm = new Term("upd", RandomPicks.randomFrom(random, updateTerms));
+      long value = random.nextInt();
+      writer.updateNumericDocValue(updateTerm, "f" + field, value);
+      writer.updateNumericDocValue(updateTerm, "cf" + field, value * 2);
+    }
+
+    writer.close();
+    
+    DirectoryReader reader = DirectoryReader.open(dir);
+    for (AtomicReaderContext context : reader.leaves()) {
+      for (int i = 0; i < numNumericFields; i++) {
+        AtomicReader r = context.reader();
+        NumericDocValues f = r.getNumericDocValues("f" + i);
+        NumericDocValues cf = r.getNumericDocValues("cf" + i);
+        for (int j = 0; j < r.maxDoc(); j++) {
+          assertEquals("reader=" + r + ", field=f" + i + ", doc=" + j, cf.get(j), f.get(j) * 2);
+        }
+      }
+    }
+    reader.close();
+    
+    dir.close();
+  }
   
   @Test
   public void testUpdatesOrder() throws Exception {
@@ -1310,4 +1379,52 @@ public class TestNumericDocValuesUpdates
     dir.close();
   }
   
+  @Test
+  public void testUpdateAllDeletedSegment() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    Document doc = new Document();
+    doc.add(new StringField("id", "doc", Store.NO));
+    doc.add(new NumericDocValuesField("f1", 1L));
+    writer.addDocument(doc);
+    writer.addDocument(doc);
+    writer.commit();
+    writer.deleteDocuments(new Term("id", "doc")); // delete all docs in the first segment
+    writer.addDocument(doc);
+    writer.updateNumericDocValue(new Term("id", "doc"), "f1", 2L);
+    writer.close();
+    
+    DirectoryReader reader = DirectoryReader.open(dir);
+    assertEquals(1, reader.leaves().size());
+    assertEquals(2L, reader.leaves().get(0).reader().getNumericDocValues("f1").get(0));
+    reader.close();
+    
+    dir.close();
+  }
+
+  @Test
+  public void testUpdateTwoNonexistingTerms() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+    IndexWriter writer = new IndexWriter(dir, conf);
+    
+    Document doc = new Document();
+    doc.add(new StringField("id", "doc", Store.NO));
+    doc.add(new NumericDocValuesField("f1", 1L));
+    writer.addDocument(doc);
+    // update w/ multiple nonexisting terms in same field
+    writer.updateNumericDocValue(new Term("c", "foo"), "f1", 2L);
+    writer.updateNumericDocValue(new Term("c", "bar"), "f1", 2L);
+    writer.close();
+    
+    DirectoryReader reader = DirectoryReader.open(dir);
+    assertEquals(1, reader.leaves().size());
+    assertEquals(1L, reader.leaves().get(0).reader().getNumericDocValues("f1").get(0));
+    reader.close();
+    
+    dir.close();
+  }
+  
 }