You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2015/02/01 10:27:35 UTC

svn commit: r1656273 [1/2] - in /lucene/dev/branches/branch_5x: lucene/ lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/ lucene/classification/src/test/org/apache/lucene/classification/ lucene/classification/src/test/org/apache/lucene/cla...

Author: mikemccand
Date: Sun Feb  1 09:27:34 2015
New Revision: 1656273

URL: http://svn.apache.org/r1656273
Log:
LUCENE-6212: remove per-doc analyzers

Modified:
    lucene/dev/branches/branch_5x/lucene/CHANGES.txt
    lucene/dev/branches/branch_5x/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java
    lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java
    lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java
    lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
    lucene/dev/branches/branch_5x/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
    lucene/dev/branches/branch_5x/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java
    lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
    lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
    lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
    lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java
    lucene/dev/branches/branch_5x/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
    lucene/dev/branches/branch_5x/lucene/misc/src/java/org/apache/lucene/index/SortingMergePolicy.java
    lucene/dev/branches/branch_5x/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
    lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/core/SolrCore.java
    lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
    lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
    lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java
    lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
    lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java
    lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestConfig.java
    lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestInfoStreamLogging.java
    lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestMergePolicyConfig.java
    lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestSolrIndexConfig.java
    lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/update/SolrIndexConfigTest.java

Modified: lucene/dev/branches/branch_5x/lucene/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/CHANGES.txt?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/CHANGES.txt (original)
+++ lucene/dev/branches/branch_5x/lucene/CHANGES.txt Sun Feb  1 09:27:34 2015
@@ -366,6 +366,11 @@ API Changes
   Weight.scoresDocsOutOfOrder and LeafCollector.acceptsDocsOutOfOrder have been
   removed and boolean queries now always score in order.
 
+* LUCENE-6212: IndexWriter no longer accepts per-document Analyzer to
+  add/updateDocument.  These methods were trappy as they made it
+  easy to accidentally index tokens that were not easily
+  searchable. (Mike McCandless)
+
 Bug Fixes
 
 * LUCENE-5650: Enforce read-only access to any path outside the temporary

Modified: lucene/dev/branches/branch_5x/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java (original)
+++ lucene/dev/branches/branch_5x/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java Sun Feb  1 09:27:34 2015
@@ -74,7 +74,7 @@ public class UIMABaseAnalyzerTest extend
     doc.add(new TextField("title", dummyTitle, Field.Store.YES));
     String dummyContent = "there is some content written here";
     doc.add(new TextField("contents", dummyContent, Field.Store.YES));
-    writer.addDocument(doc, analyzer);
+    writer.addDocument(doc);
     writer.commit();
 
     // try the search over the first doc
@@ -95,7 +95,7 @@ public class UIMABaseAnalyzerTest extend
     doc.add(new TextField("title", dogmasTitle, Field.Store.YES));
     String dogmasContents = "white men can't jump";
     doc.add(new TextField("contents", dogmasContents, Field.Store.YES));
-    writer.addDocument(doc, analyzer);
+    writer.addDocument(doc);
     writer.commit();
 
     directoryReader.close();

Modified: lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java (original)
+++ lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/ClassificationTestBase.java Sun Feb  1 09:27:34 2015
@@ -16,11 +16,15 @@
  */
 package org.apache.lucene.classification;
 
+import java.io.IOException;
+import java.util.Random;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowCompositeReaderWrapper;
@@ -32,9 +36,6 @@ import org.apache.lucene.util.TestUtil;
 import org.junit.After;
 import org.junit.Before;
 
-import java.io.IOException;
-import java.util.Random;
-
 /**
  * Base class for testing {@link Classifier}s
  */
@@ -111,7 +112,7 @@ public abstract class ClassificationTest
       assertNotNull(classificationResult.getAssignedClass());
       assertEquals("got an assigned class of " + classificationResult.getAssignedClass(), expectedResult, classificationResult.getAssignedClass());
       assertTrue("got a not positive score " + classificationResult.getScore(), classificationResult.getScore() > 0);
-      updateSampleIndex(analyzer);
+      updateSampleIndex();
       ClassificationResult<T> secondClassificationResult = classifier.assignClass(inputDoc);
       assertEquals(classificationResult.getAssignedClass(), secondClassificationResult.getAssignedClass());
       assertEquals(Double.valueOf(classificationResult.getScore()), Double.valueOf(secondClassificationResult.getScore()));
@@ -123,7 +124,8 @@ public abstract class ClassificationTest
   }
 
   private void populateSampleIndex(Analyzer analyzer) throws IOException {
-    indexWriter.deleteAll();
+    indexWriter.close();
+    indexWriter = new RandomIndexWriter(random(), dir, newIndexWriterConfig(analyzer).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
     indexWriter.commit();
 
     String text;
@@ -136,7 +138,7 @@ public abstract class ClassificationTest
     doc.add(new Field(categoryFieldName, "politics", ft));
     doc.add(new Field(booleanFieldName, "true", ft));
 
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "Mitt Romney seeks to assure Israel and Iran, as well as Jewish voters in the United" +
@@ -144,7 +146,7 @@ public abstract class ClassificationTest
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "politics", ft));
     doc.add(new Field(booleanFieldName, "true", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "And there's a threshold question that he has to answer for the American people and " +
@@ -153,7 +155,7 @@ public abstract class ClassificationTest
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "politics", ft));
     doc.add(new Field(booleanFieldName, "true", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "Still, when it comes to gun policy, many congressional Democrats have \"decided to " +
@@ -162,7 +164,7 @@ public abstract class ClassificationTest
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "politics", ft));
     doc.add(new Field(booleanFieldName, "true", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "Standing amongst the thousands of people at the state Capitol, Jorstad, director of " +
@@ -171,7 +173,7 @@ public abstract class ClassificationTest
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "technology", ft));
     doc.add(new Field(booleanFieldName, "false", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "So, about all those experts and analysts who've spent the past year or so saying " +
@@ -179,7 +181,7 @@ public abstract class ClassificationTest
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "technology", ft));
     doc.add(new Field(booleanFieldName, "false", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "More than 400 million people trust Google with their e-mail, and 50 million store files" +
@@ -188,12 +190,12 @@ public abstract class ClassificationTest
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "technology", ft));
     doc.add(new Field(booleanFieldName, "false", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "unlabeled doc";
     doc.add(new Field(textFieldName, text, ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     indexWriter.commit();
   }
@@ -215,7 +217,8 @@ public abstract class ClassificationTest
   }
 
   private void populatePerformanceIndex(Analyzer analyzer) throws IOException {
-    indexWriter.deleteAll();
+    indexWriter.close();
+    indexWriter = new RandomIndexWriter(random(), dir, newIndexWriterConfig(analyzer).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
     indexWriter.commit();
 
     FieldType ft = new FieldType(TextField.TYPE_STORED);
@@ -230,7 +233,7 @@ public abstract class ClassificationTest
       doc.add(new Field(textFieldName, createRandomString(random), ft));
       doc.add(new Field(categoryFieldName, b ? "technology" : "politics", ft));
       doc.add(new Field(booleanFieldName, String.valueOf(b), ft));
-      indexWriter.addDocument(doc, analyzer);
+      indexWriter.addDocument(doc);
     }
     indexWriter.commit();
   }
@@ -244,7 +247,7 @@ public abstract class ClassificationTest
     return builder.toString();
   }
 
-  private void updateSampleIndex(Analyzer analyzer) throws Exception {
+  private void updateSampleIndex() throws Exception {
 
     String text;
 
@@ -254,54 +257,54 @@ public abstract class ClassificationTest
     doc.add(new Field(categoryFieldName, "politics", ft));
     doc.add(new Field(booleanFieldName, "true", ft));
 
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "Julian Zelizer says Bill Clinton is still trying to shape his party, years after the White House, while George W. Bush opts for a much more passive role.";
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "politics", ft));
     doc.add(new Field(booleanFieldName, "true", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "Crossfire: Sen. Tim Scott passes on Sen. Lindsey Graham endorsement";
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "politics", ft));
     doc.add(new Field(booleanFieldName, "true", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "Illinois becomes 16th state to allow same-sex marriage.";
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "politics", ft));
     doc.add(new Field(booleanFieldName, "true", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "Apple is developing iPhones with curved-glass screens and enhanced sensors that detect different levels of pressure, according to a new report.";
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "technology", ft));
     doc.add(new Field(booleanFieldName, "false", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "The Xbox One is Microsoft's first new gaming console in eight years. It's a quality piece of hardware but it's also noteworthy because Microsoft is using it to make a statement.";
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "technology", ft));
     doc.add(new Field(booleanFieldName, "false", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "Google says it will replace a Google Maps image after a California father complained it shows the body of his teen-age son, who was shot to death in 2009.";
     doc.add(new Field(textFieldName, text, ft));
     doc.add(new Field(categoryFieldName, "technology", ft));
     doc.add(new Field(booleanFieldName, "false", ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     doc = new Document();
     text = "second unlabeled doc";
     doc.add(new Field(textFieldName, text, ft));
-    indexWriter.addDocument(doc, analyzer);
+    indexWriter.addDocument(doc);
 
     indexWriter.commit();
   }

Modified: lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java (original)
+++ lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/utils/DataSplitterTest.java Sun Feb  1 09:27:34 2015
@@ -64,8 +64,6 @@ public class DataSplitterTest extends Lu
     ft.setStoreTermVectorOffsets(true);
     ft.setStoreTermVectorPositions(true);
 
-    Analyzer analyzer = new MockAnalyzer(random());
-
     Document doc;
     Random rnd = random();
     for (int i = 0; i < 100; i++) {
@@ -73,7 +71,7 @@ public class DataSplitterTest extends Lu
       doc.add(new Field(idFieldName, Integer.toString(i), ft));
       doc.add(new Field(textFieldName, TestUtil.randomUnicodeString(rnd, 1024), ft));
       doc.add(new Field(classFieldName, TestUtil.randomUnicodeString(rnd, 10), ft));
-      indexWriter.addDocument(doc, analyzer);
+      indexWriter.addDocument(doc);
     }
 
     indexWriter.commit();

Modified: lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java (original)
+++ lucene/dev/branches/branch_5x/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java Sun Feb  1 09:27:34 2015
@@ -55,14 +55,12 @@ public class DocToDoubleVectorUtilsTest
     ft.setStoreTermVectorOffsets(true);
     ft.setStoreTermVectorPositions(true);
 
-    Analyzer analyzer = new MockAnalyzer(random());
-
     Document doc;
     for (int i = 0; i < 10; i++) {
       doc = new Document();
       doc.add(new Field("id", Integer.toString(i), ft));
       doc.add(new Field("text", random().nextInt(10) + " " + random().nextInt(10) + " " + random().nextInt(10), ft));
-      indexWriter.addDocument(doc, analyzer);
+      indexWriter.addDocument(doc);
     }
 
     indexWriter.commit();

Modified: lucene/dev/branches/branch_5x/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java Sun Feb  1 09:27:34 2015
@@ -1132,22 +1132,7 @@ public class IndexWriter implements Clos
    * @throws IOException if there is a low-level IO error
    */
   public void addDocument(Iterable<? extends IndexableField> doc) throws IOException {
-    addDocument(doc, analyzer);
-  }
-
-  /**
-   * Adds a document to this index, using the provided analyzer instead of the
-   * value of {@link #getAnalyzer()}.
-   *
-   * <p>See {@link #addDocument(Iterable)} for details on
-   * index and IndexWriter state after an Exception, and
-   * flushing/merging temporary free space requirements.</p>
-   *
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws IOException if there is a low-level IO error
-   */
-  public void addDocument(Iterable<? extends IndexableField> doc, Analyzer analyzer) throws IOException {
-    updateDocument(null, doc, analyzer);
+    updateDocument(null, doc);
   }
 
   /**
@@ -1188,22 +1173,7 @@ public class IndexWriter implements Clos
    * @lucene.experimental
    */
   public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
-    addDocuments(docs, analyzer);
-  }
-
-  /**
-   * Atomically adds a block of documents, analyzed using the
-   * provided analyzer, with sequentially assigned document
-   * IDs, such that an external reader will see all or none
-   * of the documents. 
-   *
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws IOException if there is a low-level IO error
-   *
-   * @lucene.experimental
-   */
-  public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws IOException {
-    updateDocuments(null, docs, analyzer);
+    updateDocuments(null, docs);
   }
 
   /**
@@ -1220,24 +1190,6 @@ public class IndexWriter implements Clos
    * @lucene.experimental
    */
   public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
-    updateDocuments(delTerm, docs, analyzer);
-  }
-
-  /**
-   * Atomically deletes documents matching the provided
-   * delTerm and adds a block of documents, analyzed  using
-   * the provided analyzer, with sequentially
-   * assigned document IDs, such that an external reader
-   * will see all or none of the documents. 
-   *
-   * See {@link #addDocuments(Iterable)}.
-   *
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws IOException if there is a low-level IO error
-   *
-   * @lucene.experimental
-   */
-  public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws IOException {
     ensureOpen();
     try {
       boolean success = false;
@@ -1389,26 +1341,6 @@ public class IndexWriter implements Clos
    */
   public void updateDocument(Term term, Iterable<? extends IndexableField> doc) throws IOException {
     ensureOpen();
-    updateDocument(term, doc, analyzer);
-  }
-
-  /**
-   * Updates a document by first deleting the document(s)
-   * containing <code>term</code> and then adding the new
-   * document.  The delete and then add are atomic as seen
-   * by a reader on the same index (flush may happen only after
-   * the add).
-   *
-   * @param term the term to identify the document(s) to be
-   * deleted
-   * @param doc the document to be added
-   * @param analyzer the analyzer to use when analyzing the document
-   * @throws CorruptIndexException if the index is corrupt
-   * @throws IOException if there is a low-level IO error
-   */
-  public void updateDocument(Term term, Iterable<? extends IndexableField> doc, Analyzer analyzer)
-      throws IOException {
-    ensureOpen();
     try {
       boolean success = false;
       try {

Modified: lucene/dev/branches/branch_5x/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/core/src/java/org/apache/lucene/index/TrackingIndexWriter.java Sun Feb  1 09:27:34 2015
@@ -49,15 +49,6 @@ public class TrackingIndexWriter {
   }
 
   /** Calls {@link
-   *  IndexWriter#updateDocument(Term,Iterable,Analyzer)}
-   *  and returns the generation that reflects this change. */
-  public long updateDocument(Term t, Iterable<? extends IndexableField> d, Analyzer a) throws IOException {
-    writer.updateDocument(t, d, a);
-    // Return gen as of when indexing finished:
-    return indexingGen.get();
-  }
-
-  /** Calls {@link
    *  IndexWriter#updateDocument(Term,Iterable)} and
    *  returns the generation that reflects this change. */
   public long updateDocument(Term t, Iterable<? extends IndexableField> d) throws IOException {
@@ -67,15 +58,6 @@ public class TrackingIndexWriter {
   }
 
   /** Calls {@link
-   *  IndexWriter#updateDocuments(Term,Iterable,Analyzer)}
-   *  and returns the generation that reflects this change. */
-  public long updateDocuments(Term t, Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer a) throws IOException {
-    writer.updateDocuments(t, docs, a);
-    // Return gen as of when indexing finished:
-    return indexingGen.get();
-  }
-
-  /** Calls {@link
    *  IndexWriter#updateDocuments(Term,Iterable)} and returns
    *  the generation that reflects this change. */
   public long updateDocuments(Term t, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
@@ -123,24 +105,6 @@ public class TrackingIndexWriter {
     // Return gen as of when indexing finished:
     return indexingGen.get();
   }
-
-  /** Calls {@link
-   *  IndexWriter#addDocument(Iterable,Analyzer)} and
-   *  returns the generation that reflects this change. */
-  public long addDocument(Iterable<? extends IndexableField> d, Analyzer a) throws IOException {
-    writer.addDocument(d, a);
-    // Return gen as of when indexing finished:
-    return indexingGen.get();
-  }
-
-  /** Calls {@link
-   *  IndexWriter#addDocuments(Iterable,Analyzer)} and
-   *  returns the generation that reflects this change.  */
-  public long addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer a) throws IOException {
-    writer.addDocuments(docs, a);
-    // Return gen as of when indexing finished:
-    return indexingGen.get();
-  }
 
   /** Calls {@link IndexWriter#addDocument(Iterable)}
    *  and returns the generation that reflects this change. */

Modified: lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java (original)
+++ lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java Sun Feb  1 09:27:34 2015
@@ -305,7 +305,7 @@ public class TestMockAnalyzer extends Ba
       }
     };
 
-    final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory());
+    final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory(), a);
     final Document doc = new Document();
     final FieldType ft = new FieldType();
     ft.setIndexOptions(IndexOptions.DOCS);
@@ -315,7 +315,7 @@ public class TestMockAnalyzer extends Ba
     ft.setStoreTermVectorOffsets(true);
     doc.add(new Field("f", "a", ft));
     doc.add(new Field("f", "a", ft));
-    writer.addDocument(doc, a);
+    writer.addDocument(doc);
     final LeafReader reader = getOnlySegmentReader(writer.getReader());
     final Fields fields = reader.getTermVectors(0);
     final Terms terms = fields.terms("f");

Modified: lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Sun Feb  1 09:27:34 2015
@@ -393,27 +393,35 @@ public class TestIndexWriterExceptions e
   // LUCENE-1208
   public void testExceptionJustBeforeFlush() throws IOException {
     Directory dir = newDirectory();
-    IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, 
-                                                      newIndexWriterConfig(new MockAnalyzer(random()))
-                                                        .setMaxBufferedDocs(2), 
-                                                      new TestPoint1());
-    Document doc = new Document();
-    doc.add(newTextField("field", "a field", Field.Store.YES));
-    w.addDocument(doc);
+
+    final AtomicBoolean doCrash = new AtomicBoolean();
 
     Analyzer analyzer = new Analyzer(Analyzer.PER_FIELD_REUSE_STRATEGY) {
       @Override
       public TokenStreamComponents createComponents(String fieldName) {
         MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
         tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
-        return new TokenStreamComponents(tokenizer, new CrashingFilter(fieldName, tokenizer));
+        TokenStream stream = tokenizer;
+        if (doCrash.get()) {
+          stream = new CrashingFilter(fieldName, stream);
+        }
+        return new TokenStreamComponents(tokenizer, stream);
       }
     };
 
+    IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, 
+                                                      newIndexWriterConfig(analyzer)
+                                                        .setMaxBufferedDocs(2), 
+                                                      new TestPoint1());
+    Document doc = new Document();
+    doc.add(newTextField("field", "a field", Field.Store.YES));
+    w.addDocument(doc);
+
     Document crashDoc = new Document();
     crashDoc.add(newTextField("crash", "do it on token 4", Field.Store.YES));
+    doCrash.set(true);
     try {
-      w.addDocument(crashDoc, analyzer);
+      w.addDocument(crashDoc);
       fail("did not hit expected exception");
     } catch (IOException ioe) {
       // expected

Modified: lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java (original)
+++ lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java Sun Feb  1 09:27:34 2015
@@ -42,521 +42,520 @@ import org.apache.lucene.util.TestUtil;
 
 public class TestPayloads extends LuceneTestCase {
     
-    // Simple tests to test the Payload class
-    public void testPayload() throws Exception {
-        BytesRef payload = new BytesRef("This is a test!");
-        assertEquals("Wrong payload length.", "This is a test!".length(), payload.length);
-        
-        BytesRef clone = payload.clone();
-        assertEquals(payload.length, clone.length);
-        for (int i = 0; i < payload.length; i++) {
-          assertEquals(payload.bytes[i + payload.offset], clone.bytes[i + clone.offset]);
-        }
-        
+  // Simple tests to test the Payload class
+  public void testPayload() throws Exception {
+    BytesRef payload = new BytesRef("This is a test!");
+    assertEquals("Wrong payload length.", "This is a test!".length(), payload.length);
+        
+    BytesRef clone = payload.clone();
+    assertEquals(payload.length, clone.length);
+    for (int i = 0; i < payload.length; i++) {
+      assertEquals(payload.bytes[i + payload.offset], clone.bytes[i + clone.offset]);
     }
+        
+  }
 
-    // Tests whether the DocumentWriter and SegmentMerger correctly enable the
-    // payload bit in the FieldInfo
-    public void testPayloadFieldBit() throws Exception {
-        Directory ram = newDirectory();
-        PayloadAnalyzer analyzer = new PayloadAnalyzer();
-        IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer));
-        Document d = new Document();
-        // this field won't have any payloads
-        d.add(newTextField("f1", "This field has no payloads", Field.Store.NO));
-        // this field will have payloads in all docs, however not for all term positions,
-        // so this field is used to check if the DocumentWriter correctly enables the payloads bit
-        // even if only some term positions have payloads
-        d.add(newTextField("f2", "This field has payloads in all docs", Field.Store.NO));
-        d.add(newTextField("f2", "This field has payloads in all docs NO PAYLOAD", Field.Store.NO));
-        // this field is used to verify if the SegmentMerger enables payloads for a field if it has payloads 
-        // enabled in only some documents
-        d.add(newTextField("f3", "This field has payloads in some docs", Field.Store.NO));
-        // only add payload data for field f2
-        analyzer.setPayloadData("f2", "somedata".getBytes(StandardCharsets.UTF_8), 0, 1);
-        writer.addDocument(d);
-        // flush
-        writer.close();
-
-      SegmentReader reader = getOnlySegmentReader(DirectoryReader.open(ram));
-        FieldInfos fi = reader.getFieldInfos();
-        assertFalse("Payload field bit should not be set.", fi.fieldInfo("f1").hasPayloads());
-        assertTrue("Payload field bit should be set.", fi.fieldInfo("f2").hasPayloads());
-        assertFalse("Payload field bit should not be set.", fi.fieldInfo("f3").hasPayloads());
-        reader.close();
-        
-        // now we add another document which has payloads for field f3 and verify if the SegmentMerger
-        // enabled payloads for that field
-        analyzer = new PayloadAnalyzer(); // Clear payload state for each field
-        writer = new IndexWriter(ram, newIndexWriterConfig(analyzer)
-                                        .setOpenMode(OpenMode.CREATE));
-        d = new Document();
-        d.add(newTextField("f1", "This field has no payloads", Field.Store.NO));
-        d.add(newTextField("f2", "This field has payloads in all docs", Field.Store.NO));
-        d.add(newTextField("f2", "This field has payloads in all docs", Field.Store.NO));
-        d.add(newTextField("f3", "This field has payloads in some docs", Field.Store.NO));
-        // add payload data for field f2 and f3
-        analyzer.setPayloadData("f2", "somedata".getBytes(StandardCharsets.UTF_8), 0, 1);
-        analyzer.setPayloadData("f3", "somedata".getBytes(StandardCharsets.UTF_8), 0, 3);
-        writer.addDocument(d);
-
-        // force merge
-        writer.forceMerge(1);
-        // flush
-        writer.close();
-
-      reader = getOnlySegmentReader(DirectoryReader.open(ram));
-        fi = reader.getFieldInfos();
-        assertFalse("Payload field bit should not be set.", fi.fieldInfo("f1").hasPayloads());
-        assertTrue("Payload field bit should be set.", fi.fieldInfo("f2").hasPayloads());
-        assertTrue("Payload field bit should be set.", fi.fieldInfo("f3").hasPayloads());
-        reader.close();
-        ram.close();
-    }
-
-    // Tests if payloads are correctly stored and loaded using both RamDirectory and FSDirectory
-    public void testPayloadsEncoding() throws Exception {
-        Directory dir = newDirectory();
-        performTest(dir);
-        dir.close();
-    }
-    
-    // builds an index with payloads in the given Directory and performs
-    // different tests to verify the payload encoding
-    private void performTest(Directory dir) throws Exception {
-        PayloadAnalyzer analyzer = new PayloadAnalyzer();
-        IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer)
-            .setOpenMode(OpenMode.CREATE)
-            .setMergePolicy(newLogMergePolicy()));
-        
-        // should be in sync with value in TermInfosWriter
-        final int skipInterval = 16;
-        
-        final int numTerms = 5;
-        final String fieldName = "f1";
-        
-        int numDocs = skipInterval + 1; 
-        // create content for the test documents with just a few terms
-        Term[] terms = generateTerms(fieldName, numTerms);
-        StringBuilder sb = new StringBuilder();
-        for (int i = 0; i < terms.length; i++) {
-            sb.append(terms[i].text());
-            sb.append(" ");
-        }
-        String content = sb.toString();
+  // Tests whether the DocumentWriter and SegmentMerger correctly enable the
+  // payload bit in the FieldInfo
+  public void testPayloadFieldBit() throws Exception {
+    Directory ram = newDirectory();
+    PayloadAnalyzer analyzer = new PayloadAnalyzer();
+    IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(analyzer));
+    Document d = new Document();
+    // this field won't have any payloads
+    d.add(newTextField("f1", "This field has no payloads", Field.Store.NO));
+    // this field will have payloads in all docs, however not for all term positions,
+    // so this field is used to check if the DocumentWriter correctly enables the payloads bit
+    // even if only some term positions have payloads
+    d.add(newTextField("f2", "This field has payloads in all docs", Field.Store.NO));
+    d.add(newTextField("f2", "This field has payloads in all docs NO PAYLOAD", Field.Store.NO));
+    // this field is used to verify if the SegmentMerger enables payloads for a field if it has payloads 
+    // enabled in only some documents
+    d.add(newTextField("f3", "This field has payloads in some docs", Field.Store.NO));
+    // only add payload data for field f2
+    analyzer.setPayloadData("f2", "somedata".getBytes(StandardCharsets.UTF_8), 0, 1);
+    writer.addDocument(d);
+    // flush
+    writer.close();
+
+    SegmentReader reader = getOnlySegmentReader(DirectoryReader.open(ram));
+    FieldInfos fi = reader.getFieldInfos();
+    assertFalse("Payload field bit should not be set.", fi.fieldInfo("f1").hasPayloads());
+    assertTrue("Payload field bit should be set.", fi.fieldInfo("f2").hasPayloads());
+    assertFalse("Payload field bit should not be set.", fi.fieldInfo("f3").hasPayloads());
+    reader.close();
         
+    // now we add another document which has payloads for field f3 and verify if the SegmentMerger
+    // enabled payloads for that field
+    analyzer = new PayloadAnalyzer(); // Clear payload state for each field
+    writer = new IndexWriter(ram, newIndexWriterConfig(analyzer)
+                             .setOpenMode(OpenMode.CREATE));
+    d = new Document();
+    d.add(newTextField("f1", "This field has no payloads", Field.Store.NO));
+    d.add(newTextField("f2", "This field has payloads in all docs", Field.Store.NO));
+    d.add(newTextField("f2", "This field has payloads in all docs", Field.Store.NO));
+    d.add(newTextField("f3", "This field has payloads in some docs", Field.Store.NO));
+    // add payload data for field f2 and f3
+    analyzer.setPayloadData("f2", "somedata".getBytes(StandardCharsets.UTF_8), 0, 1);
+    analyzer.setPayloadData("f3", "somedata".getBytes(StandardCharsets.UTF_8), 0, 3);
+    writer.addDocument(d);
+
+    // force merge
+    writer.forceMerge(1);
+    // flush
+    writer.close();
+
+    reader = getOnlySegmentReader(DirectoryReader.open(ram));
+    fi = reader.getFieldInfos();
+    assertFalse("Payload field bit should not be set.", fi.fieldInfo("f1").hasPayloads());
+    assertTrue("Payload field bit should be set.", fi.fieldInfo("f2").hasPayloads());
+    assertTrue("Payload field bit should be set.", fi.fieldInfo("f3").hasPayloads());
+    reader.close();
+    ram.close();
+  }
+
+  // Tests if payloads are correctly stored and loaded using both RamDirectory and FSDirectory
+  public void testPayloadsEncoding() throws Exception {
+    Directory dir = newDirectory();
+    performTest(dir);
+    dir.close();
+  }
+    
+  // builds an index with payloads in the given Directory and performs
+  // different tests to verify the payload encoding
+  private void performTest(Directory dir) throws Exception {
+    PayloadAnalyzer analyzer = new PayloadAnalyzer();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(analyzer)
+                                         .setOpenMode(OpenMode.CREATE)
+                                         .setMergePolicy(newLogMergePolicy()));
+        
+    // should be in sync with value in TermInfosWriter
+    final int skipInterval = 16;
+        
+    final int numTerms = 5;
+    final String fieldName = "f1";
+        
+    int numDocs = skipInterval + 1; 
+    // create content for the test documents with just a few terms
+    Term[] terms = generateTerms(fieldName, numTerms);
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < terms.length; i++) {
+      sb.append(terms[i].text());
+      sb.append(" ");
+    }
+    String content = sb.toString();
+        
+        
+    int payloadDataLength = numTerms * numDocs * 2 + numTerms * numDocs * (numDocs - 1) / 2;
+    byte[] payloadData = generateRandomData(payloadDataLength);
+        
+    Document d = new Document();
+    d.add(newTextField(fieldName, content, Field.Store.NO));
+    // add the same document multiple times to have the same payload lengths for all
+    // occurrences within two consecutive skip intervals
+    int offset = 0;
+    for (int i = 0; i < 2 * numDocs; i++) {
+      analyzer.setPayloadData(fieldName, payloadData, offset, 1);
+      offset += numTerms;
+      writer.addDocument(d);
+    }
+        
+    // make sure we create more than one segment to test merging
+    writer.commit();
+        
+    // now we make sure to have different payload lengths next at the next skip point        
+    for (int i = 0; i < numDocs; i++) {
+      analyzer.setPayloadData(fieldName, payloadData, offset, i);
+      offset += i * numTerms;
+      writer.addDocument(d);
+    }
         
-        int payloadDataLength = numTerms * numDocs * 2 + numTerms * numDocs * (numDocs - 1) / 2;
-        byte[] payloadData = generateRandomData(payloadDataLength);
+    writer.forceMerge(1);
+    // flush
+    writer.close();
         
-        Document d = new Document();
-        d.add(newTextField(fieldName, content, Field.Store.NO));
-        // add the same document multiple times to have the same payload lengths for all
-        // occurrences within two consecutive skip intervals
-        int offset = 0;
-        for (int i = 0; i < 2 * numDocs; i++) {
-            analyzer = new PayloadAnalyzer(fieldName, payloadData, offset, 1);
-            offset += numTerms;
-            writer.addDocument(d, analyzer);
+        
+    /*
+     * Verify the index
+     * first we test if all payloads are stored correctly
+     */        
+    IndexReader reader = DirectoryReader.open(dir);
+
+    byte[] verifyPayloadData = new byte[payloadDataLength];
+    offset = 0;
+    DocsAndPositionsEnum[] tps = new DocsAndPositionsEnum[numTerms];
+    for (int i = 0; i < numTerms; i++) {
+      tps[i] = MultiFields.getTermPositionsEnum(reader,
+                                                MultiFields.getLiveDocs(reader),
+                                                terms[i].field(),
+                                                new BytesRef(terms[i].text()));
+    }
+        
+    while (tps[0].nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+      for (int i = 1; i < numTerms; i++) {
+        tps[i].nextDoc();
+      }
+      int freq = tps[0].freq();
+
+      for (int i = 0; i < freq; i++) {
+        for (int j = 0; j < numTerms; j++) {
+          tps[j].nextPosition();
+          BytesRef br = tps[j].getPayload();
+          if (br != null) {
+            System.arraycopy(br.bytes, br.offset, verifyPayloadData, offset, br.length);
+            offset += br.length;
+          }
         }
+      }
+    }
         
-        // make sure we create more than one segment to test merging
-        writer.commit();
+    assertByteArrayEquals(payloadData, verifyPayloadData);
         
-        // now we make sure to have different payload lengths next at the next skip point        
-        for (int i = 0; i < numDocs; i++) {
-            analyzer = new PayloadAnalyzer(fieldName, payloadData, offset, i);
-            offset += i * numTerms;
-            writer.addDocument(d, analyzer);
-        }
+    /*
+     *  test lazy skipping
+     */        
+    DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
+                                                               MultiFields.getLiveDocs(reader),
+                                                               terms[0].field(),
+                                                               new BytesRef(terms[0].text()));
+    tp.nextDoc();
+    tp.nextPosition();
+    // NOTE: prior rev of this test was failing to first
+    // call next here:
+    tp.nextDoc();
+    // now we don't read this payload
+    tp.nextPosition();
+    BytesRef payload = tp.getPayload();
+    assertEquals("Wrong payload length.", 1, payload.length);
+    assertEquals(payload.bytes[payload.offset], payloadData[numTerms]);
+    tp.nextDoc();
+    tp.nextPosition();
+        
+    // we don't read this payload and skip to a different document
+    tp.advance(5);
+    tp.nextPosition();
+    payload = tp.getPayload();
+    assertEquals("Wrong payload length.", 1, payload.length);
+    assertEquals(payload.bytes[payload.offset], payloadData[5 * numTerms]);
+                
         
-        writer.forceMerge(1);
-        // flush
-        writer.close();
-        
-        
-        /*
-         * Verify the index
-         * first we test if all payloads are stored correctly
-         */        
-        IndexReader reader = DirectoryReader.open(dir);
-
-        byte[] verifyPayloadData = new byte[payloadDataLength];
-        offset = 0;
-        DocsAndPositionsEnum[] tps = new DocsAndPositionsEnum[numTerms];
-        for (int i = 0; i < numTerms; i++) {
-          tps[i] = MultiFields.getTermPositionsEnum(reader,
-                                                    MultiFields.getLiveDocs(reader),
-                                                    terms[i].field(),
-                                                    new BytesRef(terms[i].text()));
-        }
+    /*
+     * Test different lengths at skip points
+     */
+    tp = MultiFields.getTermPositionsEnum(reader,
+                                          MultiFields.getLiveDocs(reader),
+                                          terms[1].field(),
+                                          new BytesRef(terms[1].text()));
+    tp.nextDoc();
+    tp.nextPosition();
+    assertEquals("Wrong payload length.", 1, tp.getPayload().length);
+    tp.advance(skipInterval - 1);
+    tp.nextPosition();
+    assertEquals("Wrong payload length.", 1, tp.getPayload().length);
+    tp.advance(2 * skipInterval - 1);
+    tp.nextPosition();
+    assertEquals("Wrong payload length.", 1, tp.getPayload().length);
+    tp.advance(3 * skipInterval - 1);
+    tp.nextPosition();
+    assertEquals("Wrong payload length.", 3 * skipInterval - 2 * numDocs - 1, tp.getPayload().length);
         
-        while (tps[0].nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
-            for (int i = 1; i < numTerms; i++) {
-                tps[i].nextDoc();
-            }
-            int freq = tps[0].freq();
+    reader.close();
+        
+    // test long payload
+    analyzer = new PayloadAnalyzer();
+    writer = new IndexWriter(dir, newIndexWriterConfig(analyzer)
+                             .setOpenMode(OpenMode.CREATE));
+    String singleTerm = "lucene";
+        
+    d = new Document();
+    d.add(newTextField(fieldName, singleTerm, Field.Store.NO));
+    // add a payload whose length is greater than the buffer size of BufferedIndexOutput
+    payloadData = generateRandomData(2000);
+    analyzer.setPayloadData(fieldName, payloadData, 100, 1500);
+    writer.addDocument(d);
 
-            for (int i = 0; i < freq; i++) {
-                for (int j = 0; j < numTerms; j++) {
-                    tps[j].nextPosition();
-                    BytesRef br = tps[j].getPayload();
-                    if (br != null) {
-                      System.arraycopy(br.bytes, br.offset, verifyPayloadData, offset, br.length);
-                      offset += br.length;
-                    }
-                }
-            }
-        }
         
-        assertByteArrayEquals(payloadData, verifyPayloadData);
+    writer.forceMerge(1);
+    // flush
+    writer.close();
         
-        /*
-         *  test lazy skipping
-         */        
-        DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
-                                                                   MultiFields.getLiveDocs(reader),
-                                                                   terms[0].field(),
-                                                                   new BytesRef(terms[0].text()));
-        tp.nextDoc();
-        tp.nextPosition();
-        // NOTE: prior rev of this test was failing to first
-        // call next here:
-        tp.nextDoc();
-        // now we don't read this payload
-        tp.nextPosition();
-        BytesRef payload = tp.getPayload();
-        assertEquals("Wrong payload length.", 1, payload.length);
-        assertEquals(payload.bytes[payload.offset], payloadData[numTerms]);
-        tp.nextDoc();
-        tp.nextPosition();
-        
-        // we don't read this payload and skip to a different document
-        tp.advance(5);
-        tp.nextPosition();
-        payload = tp.getPayload();
-        assertEquals("Wrong payload length.", 1, payload.length);
-        assertEquals(payload.bytes[payload.offset], payloadData[5 * numTerms]);
-                
+    reader = DirectoryReader.open(dir);
+    tp = MultiFields.getTermPositionsEnum(reader,
+                                          MultiFields.getLiveDocs(reader),
+                                          fieldName,
+                                          new BytesRef(singleTerm));
+    tp.nextDoc();
+    tp.nextPosition();
+        
+    BytesRef br = tp.getPayload();
+    verifyPayloadData = new byte[br.length];
+    byte[] portion = new byte[1500];
+    System.arraycopy(payloadData, 100, portion, 0, 1500);
         
-        /*
-         * Test different lengths at skip points
-         */
-        tp = MultiFields.getTermPositionsEnum(reader,
-                                              MultiFields.getLiveDocs(reader),
-                                              terms[1].field(),
-                                              new BytesRef(terms[1].text()));
-        tp.nextDoc();
-        tp.nextPosition();
-        assertEquals("Wrong payload length.", 1, tp.getPayload().length);
-        tp.advance(skipInterval - 1);
-        tp.nextPosition();
-        assertEquals("Wrong payload length.", 1, tp.getPayload().length);
-        tp.advance(2 * skipInterval - 1);
-        tp.nextPosition();
-        assertEquals("Wrong payload length.", 1, tp.getPayload().length);
-        tp.advance(3 * skipInterval - 1);
-        tp.nextPosition();
-        assertEquals("Wrong payload length.", 3 * skipInterval - 2 * numDocs - 1, tp.getPayload().length);
-        
-        reader.close();
-        
-        // test long payload
-        analyzer = new PayloadAnalyzer();
-        writer = new IndexWriter(dir, newIndexWriterConfig(analyzer)
-                                        .setOpenMode(OpenMode.CREATE));
-        String singleTerm = "lucene";
-        
-        d = new Document();
-        d.add(newTextField(fieldName, singleTerm, Field.Store.NO));
-        // add a payload whose length is greater than the buffer size of BufferedIndexOutput
-        payloadData = generateRandomData(2000);
-        analyzer.setPayloadData(fieldName, payloadData, 100, 1500);
-        writer.addDocument(d);
-
-        
-        writer.forceMerge(1);
-        // flush
-        writer.close();
-        
-        reader = DirectoryReader.open(dir);
-        tp = MultiFields.getTermPositionsEnum(reader,
-                                              MultiFields.getLiveDocs(reader),
-                                              fieldName,
-                                              new BytesRef(singleTerm));
-        tp.nextDoc();
-        tp.nextPosition();
-        
-        BytesRef br = tp.getPayload();
-        verifyPayloadData = new byte[br.length];
-        byte[] portion = new byte[1500];
-        System.arraycopy(payloadData, 100, portion, 0, 1500);
-        
-        assertByteArrayEquals(portion, br.bytes, br.offset, br.length);
-        reader.close();
-        
-    }
-    
-    static final Charset utf8 = StandardCharsets.UTF_8;
-    
-    private void generateRandomData(byte[] data) {
-      // this test needs the random data to be valid unicode
-      String s = TestUtil.randomFixedByteLengthUnicodeString(random(), data.length);
-      byte b[] = s.getBytes(utf8);
-      assert b.length == data.length;
-      System.arraycopy(b, 0, data, 0, b.length);
-    }
-
-    private byte[] generateRandomData(int n) {
-        byte[] data = new byte[n];
-        generateRandomData(data);
-        return data;
-    }
-    
-    private Term[] generateTerms(String fieldName, int n) {
-        int maxDigits = (int) (Math.log(n) / Math.log(10));
-        Term[] terms = new Term[n];
-        StringBuilder sb = new StringBuilder();
-        for (int i = 0; i < n; i++) {
-            sb.setLength(0);
-            sb.append("t");
-            int zeros = maxDigits - (int) (Math.log(i) / Math.log(10));
-            for (int j = 0; j < zeros; j++) {
-                sb.append("0");
-            }
-            sb.append(i);
-            terms[i] = new Term(fieldName, sb.toString());
-        }
-        return terms;
+    assertByteArrayEquals(portion, br.bytes, br.offset, br.length);
+    reader.close();
+        
+  }
+    
+  static final Charset utf8 = StandardCharsets.UTF_8;
+    
+  private void generateRandomData(byte[] data) {
+    // this test needs the random data to be valid unicode
+    String s = TestUtil.randomFixedByteLengthUnicodeString(random(), data.length);
+    byte b[] = s.getBytes(utf8);
+    assert b.length == data.length;
+    System.arraycopy(b, 0, data, 0, b.length);
+  }
+
+  private byte[] generateRandomData(int n) {
+    byte[] data = new byte[n];
+    generateRandomData(data);
+    return data;
+  }
+    
+  private Term[] generateTerms(String fieldName, int n) {
+    int maxDigits = (int) (Math.log(n) / Math.log(10));
+    Term[] terms = new Term[n];
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < n; i++) {
+      sb.setLength(0);
+      sb.append("t");
+      int zeros = maxDigits - (int) (Math.log(i) / Math.log(10));
+      for (int j = 0; j < zeros; j++) {
+        sb.append("0");
+      }
+      sb.append(i);
+      terms[i] = new Term(fieldName, sb.toString());
     }
+    return terms;
+  }
 
 
-    void assertByteArrayEquals(byte[] b1, byte[] b2) {
-        if (b1.length != b2.length) {
-          fail("Byte arrays have different lengths: " + b1.length + ", " + b2.length);
-        }
+  void assertByteArrayEquals(byte[] b1, byte[] b2) {
+    if (b1.length != b2.length) {
+      fail("Byte arrays have different lengths: " + b1.length + ", " + b2.length);
+    }
         
-        for (int i = 0; i < b1.length; i++) {
-          if (b1[i] != b2[i]) {
-            fail("Byte arrays different at index " + i + ": " + b1[i] + ", " + b2[i]);
-          }
-        }
-      }    
+    for (int i = 0; i < b1.length; i++) {
+      if (b1[i] != b2[i]) {
+        fail("Byte arrays different at index " + i + ": " + b1[i] + ", " + b2[i]);
+      }
+    }
+  }    
     
   void assertByteArrayEquals(byte[] b1, byte[] b2, int b2offset, int b2length) {
-        if (b1.length != b2length) {
-          fail("Byte arrays have different lengths: " + b1.length + ", " + b2length);
-        }
+    if (b1.length != b2length) {
+      fail("Byte arrays have different lengths: " + b1.length + ", " + b2length);
+    }
         
-        for (int i = 0; i < b1.length; i++) {
-          if (b1[i] != b2[b2offset+i]) {
-            fail("Byte arrays different at index " + i + ": " + b1[i] + ", " + b2[b2offset+i]);
-          }
-        }
-      }    
+    for (int i = 0; i < b1.length; i++) {
+      if (b1[i] != b2[b2offset+i]) {
+        fail("Byte arrays different at index " + i + ": " + b1[i] + ", " + b2[b2offset+i]);
+      }
+    }
+  }    
     
     
-    /**
-     * This Analyzer uses an WhitespaceTokenizer and PayloadFilter.
-     */
-    private static class PayloadAnalyzer extends Analyzer {
-        Map<String,PayloadData> fieldToData = new HashMap<>();
+  static class PayloadData {
+    byte[] data;
+    int offset;
+    int length;
+
+    PayloadData(byte[] data, int offset, int length) {
+      this.data = data;
+      this.offset = offset;
+      this.length = length;
+    }
+  }
 
-        public PayloadAnalyzer() {
-          super(PER_FIELD_REUSE_STRATEGY);
-        }
-        
-        public PayloadAnalyzer(String field, byte[] data, int offset, int length) {
-            super(PER_FIELD_REUSE_STRATEGY);
-            setPayloadData(field, data, offset, length);
-        }
+  /**
+   * This Analyzer uses an MockTokenizer and PayloadFilter.
+   */
+  private static class PayloadAnalyzer extends Analyzer {
+    Map<String,PayloadData> fieldToData = new HashMap<>();
 
-        void setPayloadData(String field, byte[] data, int offset, int length) {
-            fieldToData.put(field, new PayloadData(data, offset, length));
-        }
+    public PayloadAnalyzer() {
+      super(PER_FIELD_REUSE_STRATEGY);
+    }
         
-        @Override
-        public TokenStreamComponents createComponents(String fieldName) {
-            PayloadData payload =  fieldToData.get(fieldName);
-            Tokenizer ts = new MockTokenizer(MockTokenizer.WHITESPACE, false);
-            TokenStream tokenStream = (payload != null) ?
-                new PayloadFilter(ts, payload.data, payload.offset, payload.length) : ts;
-            return new TokenStreamComponents(ts, tokenStream);
-        }
+    public PayloadAnalyzer(String field, byte[] data, int offset, int length) {
+      super(PER_FIELD_REUSE_STRATEGY);
+      setPayloadData(field, data, offset, length);
+    }
+
+    void setPayloadData(String field, byte[] data, int offset, int length) {
+      fieldToData.put(field, new PayloadData(data, offset, length));
+    }
         
-        private static class PayloadData {
-            byte[] data;
-            int offset;
-            int length;
-
-            PayloadData(byte[] data, int offset, int length) {
-                this.data = data;
-                this.offset = offset;
-                this.length = length;
-            }
-        }
+    @Override
+    public TokenStreamComponents createComponents(String fieldName) {
+      PayloadData payload =  fieldToData.get(fieldName);
+      Tokenizer ts = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+      TokenStream tokenStream = (payload != null) ?
+        new PayloadFilter(ts, fieldName, fieldToData) : ts;
+      return new TokenStreamComponents(ts, tokenStream);
     }
+  }
 
     
-    /**
-     * This Filter adds payloads to the tokens.
-     */
-    private static class PayloadFilter extends TokenFilter {
-        private byte[] data;
-        private int length;
-        private int offset;
-        private int startOffset;
-        PayloadAttribute payloadAtt;
-        CharTermAttribute termAttribute;
-        
-        public PayloadFilter(TokenStream in, byte[] data, int offset, int length) {
-            super(in);
-            this.data = data;
-            this.length = length;
-            this.offset = offset;
-            this.startOffset = offset;
-            payloadAtt = addAttribute(PayloadAttribute.class);
-            termAttribute = addAttribute(CharTermAttribute.class);
-        }
-        
-        @Override
-        public boolean incrementToken() throws IOException {
-            boolean hasNext = input.incrementToken();
-            if (!hasNext) {
-              return false;
-            }
+  /**
+   * This Filter adds payloads to the tokens.
+   */
+  private static class PayloadFilter extends TokenFilter {
+    PayloadAttribute payloadAtt;
+    CharTermAttribute termAttribute;
+    private Map<String,PayloadData> fieldToData;
+    private String fieldName;
+    private PayloadData payloadData;
+    private int offset;
+        
+    public PayloadFilter(TokenStream in, String fieldName, Map<String,PayloadData> fieldToData) {
+      super(in);
+      this.fieldToData = fieldToData;
+      this.fieldName = fieldName;
+      payloadAtt = addAttribute(PayloadAttribute.class);
+      termAttribute = addAttribute(CharTermAttribute.class);
+    }
+        
+    @Override
+    public boolean incrementToken() throws IOException {
+      boolean hasNext = input.incrementToken();
+      if (!hasNext) {
+        return false;
+      }
 
-            // Some values of the same field are to have payloads and others not
-            if (offset + length <= data.length && !termAttribute.toString().endsWith("NO PAYLOAD")) {
-              BytesRef p = new BytesRef(data, offset, length);
-              payloadAtt.setPayload(p);
-              offset += length;
-            } else {
-              payloadAtt.setPayload(null);
-            }
+      // Some values of the same field are to have payloads and others not
+      if (offset + payloadData.length <= payloadData.data.length && !termAttribute.toString().endsWith("NO PAYLOAD")) {
+        BytesRef p = new BytesRef(payloadData.data, offset, payloadData.length);
+        payloadAtt.setPayload(p);
+        offset += payloadData.length;
+      } else {
+        payloadAtt.setPayload(null);
+      }
 
-            return true;
-        }
+      return true;
+    }
 
-      @Override
-      public void reset() throws IOException {
-        super.reset();
-        this.offset = startOffset;
-      }
+    @Override
+    public void reset() throws IOException {
+      super.reset();
+      this.payloadData = fieldToData.get(fieldName);
+      this.offset = payloadData.offset;
     }
+  }
     
-    public void testThreadSafety() throws Exception {
-        final int numThreads = 5;
-        final int numDocs = atLeast(50);
-        final ByteArrayPool pool = new ByteArrayPool(numThreads, 5);
-        
-        Directory dir = newDirectory();
-        final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
-        final String field = "test";
-        
-        Thread[] ingesters = new Thread[numThreads];
-        for (int i = 0; i < numThreads; i++) {
-            ingesters[i] = new Thread() {
-                @Override
-                public void run() {
-                    try {
-                        for (int j = 0; j < numDocs; j++) {
-                            Document d = new Document();
-                            d.add(new TextField(field, new PoolingPayloadTokenStream(pool)));
-                            writer.addDocument(d);
-                        }
-                    } catch (Exception e) {
-                        e.printStackTrace();
-                        fail(e.toString());
-                    }
-                }
-            };
-            ingesters[i].start();
-        }
+  public void testThreadSafety() throws Exception {
+    final int numThreads = 5;
+    final int numDocs = atLeast(50);
+    final ByteArrayPool pool = new ByteArrayPool(numThreads, 5);
         
-        for (int i = 0; i < numThreads; i++) {
-          ingesters[i].join();
-        }
-        writer.close();
-        IndexReader reader = DirectoryReader.open(dir);
-        TermsEnum terms = MultiFields.getFields(reader).terms(field).iterator(null);
-        Bits liveDocs = MultiFields.getLiveDocs(reader);
-        DocsAndPositionsEnum tp = null;
-        while (terms.next() != null) {
-          String termText = terms.term().utf8ToString();
-          tp = terms.docsAndPositions(liveDocs, tp);
-          while(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
-            int freq = tp.freq();
-            for (int i = 0; i < freq; i++) {
-              tp.nextPosition();
-              final BytesRef payload = tp.getPayload();
-              assertEquals(termText, payload.utf8ToString());
+    Directory dir = newDirectory();
+    final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    final String field = "test";
+        
+    Thread[] ingesters = new Thread[numThreads];
+    for (int i = 0; i < numThreads; i++) {
+      ingesters[i] = new Thread() {
+          @Override
+          public void run() {
+            try {
+              for (int j = 0; j < numDocs; j++) {
+                Document d = new Document();
+                d.add(new TextField(field, new PoolingPayloadTokenStream(pool)));
+                writer.addDocument(d);
+              }
+            } catch (Exception e) {
+              e.printStackTrace();
+              fail(e.toString());
             }
           }
-        }
-        reader.close();
-        dir.close();
-        assertEquals(pool.size(), numThreads);
-    }
-    
-    private class PoolingPayloadTokenStream extends TokenStream {
-        private byte[] payload;
-        private boolean first;
-        private ByteArrayPool pool;
-        private String term;
-
-        CharTermAttribute termAtt;
-        PayloadAttribute payloadAtt;
-        
-        PoolingPayloadTokenStream(ByteArrayPool pool) {
-            this.pool = pool;
-            payload = pool.get();
-            generateRandomData(payload);
-            term = new String(payload, 0, payload.length, utf8);
-            first = true;
-            payloadAtt = addAttribute(PayloadAttribute.class);
-            termAtt = addAttribute(CharTermAttribute.class);
-        }
-        
-        @Override
-        public boolean incrementToken() throws IOException {
-            if (!first) return false;
-            first = false;
-            clearAttributes();
-            termAtt.append(term);
-            payloadAtt.setPayload(new BytesRef(payload));
-            return true;
-        }
+        };
+      ingesters[i].start();
+    }
         
-        @Override
-        public void close() throws IOException {
-            pool.release(payload);
+    for (int i = 0; i < numThreads; i++) {
+      ingesters[i].join();
+    }
+    writer.close();
+    IndexReader reader = DirectoryReader.open(dir);
+    TermsEnum terms = MultiFields.getFields(reader).terms(field).iterator(null);
+    Bits liveDocs = MultiFields.getLiveDocs(reader);
+    DocsAndPositionsEnum tp = null;
+    while (terms.next() != null) {
+      String termText = terms.term().utf8ToString();
+      tp = terms.docsAndPositions(liveDocs, tp);
+      while(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+        int freq = tp.freq();
+        for (int i = 0; i < freq; i++) {
+          tp.nextPosition();
+          final BytesRef payload = tp.getPayload();
+          assertEquals(termText, payload.utf8ToString());
         }
-        
+      }
+    }
+    reader.close();
+    dir.close();
+    assertEquals(pool.size(), numThreads);
+  }
+    
+  private class PoolingPayloadTokenStream extends TokenStream {
+    private byte[] payload;
+    private boolean first;
+    private ByteArrayPool pool;
+    private String term;
+
+    CharTermAttribute termAtt;
+    PayloadAttribute payloadAtt;
+        
+    PoolingPayloadTokenStream(ByteArrayPool pool) {
+      this.pool = pool;
+      payload = pool.get();
+      generateRandomData(payload);
+      term = new String(payload, 0, payload.length, utf8);
+      first = true;
+      payloadAtt = addAttribute(PayloadAttribute.class);
+      termAtt = addAttribute(CharTermAttribute.class);
+    }
+        
+    @Override
+    public boolean incrementToken() throws IOException {
+      if (!first) return false;
+      first = false;
+      clearAttributes();
+      termAtt.append(term);
+      payloadAtt.setPayload(new BytesRef(payload));
+      return true;
+    }
+        
+    @Override
+    public void close() throws IOException {
+      pool.release(payload);
     }
+        
+  }
     
-    private static class ByteArrayPool {
-        private List<byte[]> pool;
+  private static class ByteArrayPool {
+    private List<byte[]> pool;
         
-        ByteArrayPool(int capacity, int size) {
-            pool = new ArrayList<>();
-            for (int i = 0; i < capacity; i++) {
-                pool.add(new byte[size]);
-            }
-        }
+    ByteArrayPool(int capacity, int size) {
+      pool = new ArrayList<>();
+      for (int i = 0; i < capacity; i++) {
+        pool.add(new byte[size]);
+      }
+    }
     
-        synchronized byte[] get() {
-            return pool.remove(0);
-        }
+    synchronized byte[] get() {
+      return pool.remove(0);
+    }
         
-        synchronized void release(byte[] b) {
-            pool.add(b);
-        }
+    synchronized void release(byte[] b) {
+      pool.add(b);
+    }
         
-        synchronized int size() {
-            return pool.size();
-        }
+    synchronized int size() {
+      return pool.size();
     }
+  }
 
   public void testAcrossFields() throws Exception {
     Directory dir = newDirectory();
@@ -646,5 +645,4 @@ public class TestPayloads extends Lucene
     reader.close();
     dir.close();
   }
-  
 }

Modified: lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java (original)
+++ lucene/dev/branches/branch_5x/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java Sun Feb  1 09:27:34 2015
@@ -390,9 +390,9 @@ public class TestControlledRealTimeReope
 
     @Override
     public void updateDocument(Term term,
-        Iterable<? extends IndexableField> doc, Analyzer analyzer)
+        Iterable<? extends IndexableField> doc)
         throws IOException {
-      super.updateDocument(term, doc, analyzer);
+      super.updateDocument(term, doc);
       try {
         if (waitAfterUpdate) {
           signal.countDown();

Modified: lucene/dev/branches/branch_5x/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (original)
+++ lucene/dev/branches/branch_5x/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java Sun Feb  1 09:27:34 2015
@@ -2039,22 +2039,22 @@ public class HighlighterTest extends Bas
     Document doc = new Document();
     doc.add(new IntField(NUMERIC_FIELD_NAME, 1, Field.Store.NO));
     doc.add(new StoredField(NUMERIC_FIELD_NAME, 1));
-    writer.addDocument(doc, analyzer);
+    writer.addDocument(doc);
 
     doc = new Document();
     doc.add(new IntField(NUMERIC_FIELD_NAME, 3, Field.Store.NO));
     doc.add(new StoredField(NUMERIC_FIELD_NAME, 3));
-    writer.addDocument(doc, analyzer);
+    writer.addDocument(doc);
 
     doc = new Document();
     doc.add(new IntField(NUMERIC_FIELD_NAME, 5, Field.Store.NO));
     doc.add(new StoredField(NUMERIC_FIELD_NAME, 5));
-    writer.addDocument(doc, analyzer);
+    writer.addDocument(doc);
 
     doc = new Document();
     doc.add(new IntField(NUMERIC_FIELD_NAME, 7, Field.Store.NO));
     doc.add(new StoredField(NUMERIC_FIELD_NAME, 7));
-    writer.addDocument(doc, analyzer);
+    writer.addDocument(doc);
 
     Document childDoc = doc(FIELD_NAME, "child document");
     Document parentDoc = doc(FIELD_NAME, "parent document");

Modified: lucene/dev/branches/branch_5x/lucene/misc/src/java/org/apache/lucene/index/SortingMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/misc/src/java/org/apache/lucene/index/SortingMergePolicy.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/misc/src/java/org/apache/lucene/index/SortingMergePolicy.java (original)
+++ lucene/dev/branches/branch_5x/lucene/misc/src/java/org/apache/lucene/index/SortingMergePolicy.java Sun Feb  1 09:27:34 2015
@@ -47,7 +47,7 @@ import org.apache.lucene.util.packed.Pac
  *  will be sorted while segments resulting from a flush will be in the order
  *  in which documents have been added.
  *  <p><b>NOTE</b>: Never use this policy if you rely on
- *  {@link IndexWriter#addDocuments(Iterable, Analyzer) IndexWriter.addDocuments}
+ *  {@link IndexWriter#addDocuments(Iterable) IndexWriter.addDocuments}
  *  to have sequentially-assigned doc IDs, this policy will scatter doc IDs.
  *  <p><b>NOTE</b>: This policy should only be used with idempotent {@code Sort}s 
  *  so that the order of segments is predictable. For example, using 

Modified: lucene/dev/branches/branch_5x/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java (original)
+++ lucene/dev/branches/branch_5x/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java Sun Feb  1 09:27:34 2015
@@ -104,11 +104,6 @@ public class RandomIndexWriter implement
    */
   public <T extends IndexableField> void addDocument(final Iterable<T> doc) throws IOException {
     LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, w.getConfig());
-    addDocument(doc, w.getAnalyzer());
-  }
-
-  public <T extends IndexableField> void addDocument(final Iterable<T> doc, Analyzer a) throws IOException {
-    LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, w.getConfig());
     if (r.nextInt(5) == 3) {
       // TODO: maybe, we should simply buffer up added docs
       // (but we need to clone them), and only when
@@ -141,9 +136,9 @@ public class RandomIndexWriter implement
             }
           };
         }
-        }, a);
+        });
     } else {
-      w.addDocument(doc, a);
+      w.addDocument(doc);
     }
     
     maybeCommit();

Modified: lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/core/SolrCore.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/core/SolrCore.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/core/SolrCore.java (original)
+++ lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/core/SolrCore.java Sun Feb  1 09:27:34 2015
@@ -536,7 +536,7 @@ public final class SolrCore implements S
         log.warn(logid+"Solr index directory '" + new File(indexDir) + "' doesn't exist."
                 + " Creating new index...");
 
-        SolrIndexWriter writer = SolrIndexWriter.create("SolrCore.initIndex", indexDir, getDirectoryFactory(), true, 
+        SolrIndexWriter writer = SolrIndexWriter.create(this, "SolrCore.initIndex", indexDir, getDirectoryFactory(), true, 
                                                         getLatestSchema(), solrConfig.indexConfig, solrDelPolicy, codec);
         writer.close();
       }

Modified: lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java (original)
+++ lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java Sun Feb  1 09:27:34 2015
@@ -264,7 +264,7 @@ public final class DefaultSolrCoreState
   }
   
   protected SolrIndexWriter createMainIndexWriter(SolrCore core, String name) throws IOException {
-    return SolrIndexWriter.create(name, core.getNewIndexDir(),
+    return SolrIndexWriter.create(core, name, core.getNewIndexDir(),
         core.getDirectoryFactory(), false, core.getLatestSchema(),
         core.getSolrConfig().indexConfig, core.getDeletionPolicy(), core.getCodec());
   }

Modified: lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java (original)
+++ lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java Sun Feb  1 09:27:34 2015
@@ -235,11 +235,11 @@ public class DirectUpdateHandler2 extend
             }
 
             if (cmd.isBlock()) {
-              writer.updateDocuments(updateTerm, cmd, schema.getIndexAnalyzer());
+              writer.updateDocuments(updateTerm, cmd);
             } else {
               Document luceneDocument = cmd.getLuceneDocument();
               // SolrCore.verbose("updateDocument",updateTerm,luceneDocument,writer);
-              writer.updateDocument(updateTerm, luceneDocument, schema.getIndexAnalyzer());
+              writer.updateDocument(updateTerm, luceneDocument);
             }
             // SolrCore.verbose("updateDocument",updateTerm,"DONE");
             
@@ -264,9 +264,9 @@ public class DirectUpdateHandler2 extend
         } else {
           // allow duplicates
           if (cmd.isBlock()) {
-            writer.addDocuments(cmd, schema.getIndexAnalyzer());
+            writer.addDocuments(cmd);
           } else {
-            writer.addDocument(cmd.getLuceneDocument(), schema.getIndexAnalyzer());
+            writer.addDocument(cmd.getLuceneDocument());
           }
 
           if (ulog != null) ulog.add(cmd);
@@ -442,8 +442,7 @@ public class DirectUpdateHandler2 extend
       RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
       try {
         IndexWriter writer = iw.get();
-        writer.updateDocument(idTerm, luceneDocument, cmd.getReq().getSchema()
-            .getIndexAnalyzer());
+        writer.updateDocument(idTerm, luceneDocument);
         
         for (Query q : dbqList) {
           writer.deleteDocuments(new DeleteByQueryWrapper(q, core.getLatestSchema()));

Modified: lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java (original)
+++ lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java Sun Feb  1 09:27:34 2015
@@ -17,6 +17,14 @@
 
 package org.apache.solr.update;
 
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.PrintStream;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
 import org.apache.lucene.index.*;
 import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
 import org.apache.lucene.util.InfoStream;
@@ -24,16 +32,14 @@ import org.apache.lucene.util.Version;
 import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.MapSerializable;
-import org.apache.solr.core.SolrConfig;
 import org.apache.solr.core.PluginInfo;
+import org.apache.solr.core.SolrConfig;
+import org.apache.solr.core.SolrCore;
 import org.apache.solr.schema.IndexSchema;
 import org.apache.solr.util.SolrPluginUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.List;
-import java.util.Map;
-
 import static org.apache.solr.core.Config.assertWarnOrFail;
 
 /**
@@ -180,12 +186,23 @@ public class SolrIndexConfig implements
     return l.isEmpty() ? def : l.get(0);
   }
 
-  public IndexWriterConfig toIndexWriterConfig(IndexSchema schema) {
-    // so that we can update the analyzer on core reload, we pass null
-    // for the default analyzer, and explicitly pass an analyzer on 
-    // appropriate calls to IndexWriter
-    
-    IndexWriterConfig iwc = new IndexWriterConfig(null);
+  private static class DelayedSchemaAnalyzer extends DelegatingAnalyzerWrapper {
+    private final SolrCore core;
+
+    public DelayedSchemaAnalyzer(SolrCore core) {
+      super(PER_FIELD_REUSE_STRATEGY);
+      this.core = core;
+    }
+
+    @Override
+    protected Analyzer getWrappedAnalyzer(String fieldName) {
+      return core.getLatestSchema().getIndexAnalyzer();
+    }
+  }
+
+  public IndexWriterConfig toIndexWriterConfig(SolrCore core) {
+    IndexSchema schema = core.getLatestSchema();
+    IndexWriterConfig iwc = new IndexWriterConfig(new DelayedSchemaAnalyzer(core));
     if (maxBufferedDocs != -1)
       iwc.setMaxBufferedDocs(maxBufferedDocs);
 

Modified: lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java (original)
+++ lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java Sun Feb  1 09:27:34 2015
@@ -121,7 +121,7 @@ public class SolrIndexSplitter {
       } else {
         SolrCore core = searcher.getCore();
         String path = paths.get(partitionNumber);
-        iw = SolrIndexWriter.create("SplittingIndexWriter"+partitionNumber + (ranges != null ? " " + ranges.get(partitionNumber) : ""), path,
+        iw = SolrIndexWriter.create(core, "SplittingIndexWriter"+partitionNumber + (ranges != null ? " " + ranges.get(partitionNumber) : ""), path,
                                     core.getDirectoryFactory(), true, core.getLatestSchema(),
                                     core.getSolrConfig().indexConfig, core.getDeletionPolicy(), core.getCodec());
       }

Modified: lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java (original)
+++ lucene/dev/branches/branch_5x/solr/core/src/java/org/apache/solr/update/SolrIndexWriter.java Sun Feb  1 09:27:34 2015
@@ -27,8 +27,9 @@ import org.apache.lucene.index.IndexWrit
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.InfoStream;
 import org.apache.solr.common.util.IOUtils;
-import org.apache.solr.core.DirectoryFactory;
 import org.apache.solr.core.DirectoryFactory.DirContext;
+import org.apache.solr.core.DirectoryFactory;
+import org.apache.solr.core.SolrCore;
 import org.apache.solr.schema.IndexSchema;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -56,12 +57,12 @@ public class SolrIndexWriter extends Ind
   private InfoStream infoStream;
   private Directory directory;
 
-  public static SolrIndexWriter create(String name, String path, DirectoryFactory directoryFactory, boolean create, IndexSchema schema, SolrIndexConfig config, IndexDeletionPolicy delPolicy, Codec codec) throws IOException {
+  public static SolrIndexWriter create(SolrCore core, String name, String path, DirectoryFactory directoryFactory, boolean create, IndexSchema schema, SolrIndexConfig config, IndexDeletionPolicy delPolicy, Codec codec) throws IOException {
 
     SolrIndexWriter w = null;
     final Directory d = directoryFactory.get(path, DirContext.DEFAULT, config.lockType);
     try {
-      w = new SolrIndexWriter(name, path, d, create, schema, 
+      w = new SolrIndexWriter(core, name, path, d, create, schema, 
                               config, delPolicy, codec);
       w.setDirectoryFactory(directoryFactory);
       return w;
@@ -73,9 +74,9 @@ public class SolrIndexWriter extends Ind
     }
   }
 
-  private SolrIndexWriter(String name, String path, Directory directory, boolean create, IndexSchema schema, SolrIndexConfig config, IndexDeletionPolicy delPolicy, Codec codec) throws IOException {
+  private SolrIndexWriter(SolrCore core, String name, String path, Directory directory, boolean create, IndexSchema schema, SolrIndexConfig config, IndexDeletionPolicy delPolicy, Codec codec) throws IOException {
     super(directory,
-          config.toIndexWriterConfig(schema).
+          config.toIndexWriterConfig(core).
           setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND).
           setIndexDeletionPolicy(delPolicy).setCodec(codec)
           );

Modified: lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestConfig.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestConfig.java (original)
+++ lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestConfig.java Sun Feb  1 09:27:34 2015
@@ -116,7 +116,7 @@ public class TestConfig extends SolrTest
     assertEquals("default useCompoundFile", false, sic.getUseCompoundFile());
 
     IndexSchema indexSchema = IndexSchemaFactory.buildIndexSchema("schema.xml", solrConfig);
-    IndexWriterConfig iwc = sic.toIndexWriterConfig(indexSchema);
+    IndexWriterConfig iwc = sic.toIndexWriterConfig(h.getCore());
 
     assertNotNull("null mp", iwc.getMergePolicy());
     assertTrue("mp is not TMP", iwc.getMergePolicy() instanceof TieredMergePolicy);

Modified: lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestInfoStreamLogging.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestInfoStreamLogging.java?rev=1656273&r1=1656272&r2=1656273&view=diff
==============================================================================
--- lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestInfoStreamLogging.java (original)
+++ lucene/dev/branches/branch_5x/solr/core/src/test/org/apache/solr/core/TestInfoStreamLogging.java Sun Feb  1 09:27:34 2015
@@ -31,7 +31,7 @@ public class TestInfoStreamLogging exten
   }
   
   public void testIndexConfig() throws Exception {
-    IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore().getLatestSchema());
+    IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore());
 
     assertTrue(iwc.getInfoStream() instanceof LoggingInfoStream);
   }