You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2014/11/30 12:07:19 UTC

svn commit: r1642535 [5/19] - in /lucene/dev/branches/lucene6005/lucene: analysis/common/src/java/org/apache/lucene/collation/ analysis/common/src/test/org/apache/lucene/analysis/core/ analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/ ...

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAbuseSchema.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAbuseSchema.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAbuseSchema.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAbuseSchema.java Sun Nov 30 11:07:09 2014
@@ -27,7 +27,7 @@ import org.apache.lucene.analysis.Analyz
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Document2;
+import org.apache.lucene.document.Document;
 import org.apache.lucene.document.LowSchemaField;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
@@ -46,16 +46,17 @@ public class TestAbuseSchema extends Luc
   // LUCENE-1010
   public void testNoTermVectorAfterTermVectorMerge() throws IOException {
     Directory dir = newDirectory();
-    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig());
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(a));
     List<LowSchemaField> document = new ArrayList<>();
-    LowSchemaField field = new LowSchemaField("tvtest", "a b c", IndexOptions.DOCS, false);
+    LowSchemaField field = new LowSchemaField(a, "tvtest", "a b c", IndexOptions.DOCS, false);
     field.enableTermVectors(false, false, false);
     document.add(field);
     iw.addDocument(document);
     iw.commit();
 
     document = new ArrayList<>();
-    document.add(new LowSchemaField("tvtest", "a b c", IndexOptions.DOCS, false));
+    document.add(new LowSchemaField(a, "tvtest", "a b c", IndexOptions.DOCS, false));
     iw.addDocument(document);
     // Make first segment
     iw.commit();
@@ -81,36 +82,41 @@ public class TestAbuseSchema extends Luc
     LowSchemaField f1, f2;
 
     // no vectors + vectors
-    f1 = new LowSchemaField("field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
-    f2 = new LowSchemaField("field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    Analyzer a = new MockAnalyzer(random());
+    f1 = new LowSchemaField(a, "field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2 = new LowSchemaField(a, "field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     f2.enableTermVectors(false, false, false);
     doTestMixup(f1, f2);
     
     // vectors + vectors with pos
-    f1 = new LowSchemaField("field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    a = new MockAnalyzer(random());
+    f1 = new LowSchemaField(a, "field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     f1.enableTermVectors(false, false, false);
-    f2 = new LowSchemaField("field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2 = new LowSchemaField(a, "field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     f2.enableTermVectors(true, false, false);
     doTestMixup(f1, f2);
     
     // vectors + vectors with off
-    f1 = new LowSchemaField("field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    a = new MockAnalyzer(random());
+    f1 = new LowSchemaField(a, "field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     f1.enableTermVectors(false, false, false);
-    f2 = new LowSchemaField("field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2 = new LowSchemaField(a, "field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     f2.enableTermVectors(false, true, false);
     doTestMixup(f1, f2);
     
     // vectors with pos + vectors with pos + off
-    f1 = new LowSchemaField("field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    a = new MockAnalyzer(random());
+    f1 = new LowSchemaField(a, "field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     f1.enableTermVectors(true, false, false);
-    f2 = new LowSchemaField("field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2 = new LowSchemaField(a, "field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     f2.enableTermVectors(true, true, false);
     doTestMixup(f1, f2);
 
     // vectors with pos + vectors with pos + pay
-    f1 = new LowSchemaField("field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    a = new MockAnalyzer(random());
+    f1 = new LowSchemaField(a, "field", "value1", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     f1.enableTermVectors(true, false, false);
-    f2 = new LowSchemaField("field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    f2 = new LowSchemaField(a, "field", "value2", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     f2.enableTermVectors(true, false, true);
     doTestMixup(f1, f2);
   }
@@ -121,7 +127,7 @@ public class TestAbuseSchema extends Luc
     
     // add 3 good docs
     for (int i = 0; i < 3; i++) {
-      Document2 doc = iw.newDocument();
+      Document doc = iw.newDocument();
       doc.addAtom("id", Integer.toString(i));
       iw.addDocument(doc);
     }
@@ -153,12 +159,13 @@ public class TestAbuseSchema extends Luc
   public void testNoAbortOnBadTVSettings() throws Exception {
     Directory dir = newDirectory();
     // Don't use RandomIndexWriter because we want to be sure both docs go to 1 seg:
-    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriterConfig iwc = new IndexWriterConfig(a);
     IndexWriter iw = new IndexWriter(dir, iwc);
 
     List<LowSchemaField> doc = new ArrayList<>();
     iw.addDocument(doc);
-    LowSchemaField field = new LowSchemaField("field", "value", IndexOptions.NONE, false);
+    LowSchemaField field = new LowSchemaField(a, "field", "value", IndexOptions.NONE, false);
     field.enableTermVectors(false, false, false);
     doc.add(field);
     try {
@@ -178,15 +185,16 @@ public class TestAbuseSchema extends Luc
 
   public void testPostingsOffsetsWithUnindexedFields() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter riw = newRandomIndexWriter(dir);
+    Analyzer a = new MockAnalyzer(random());
+    RandomIndexWriter riw = newRandomIndexWriter(dir, a);
     for (int i = 0; i < 100; i++) {
       // ensure at least one doc is indexed with offsets
       LowSchemaField field;
       if (i < 99 && random().nextInt(2) == 0) {
         // stored only
-        field = new LowSchemaField("foo", "boo!", IndexOptions.NONE, false);
+        field = new LowSchemaField(a, "foo", "boo!", IndexOptions.NONE, false);
       } else {
-        field = new LowSchemaField("foo", "boo!", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, true);
+        field = new LowSchemaField(a, "foo", "boo!", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, true);
         if (random().nextBoolean()) {
           // store some term vectors for the checkindex cross-check
           field.enableTermVectors(random().nextBoolean(), random().nextBoolean(), false);
@@ -211,22 +219,23 @@ public class TestAbuseSchema extends Luc
    * as the fully merged equivalent.
    */
   public void testOmitNormsCombos() throws IOException {
+    Analyzer a = new MockAnalyzer(random());
     // indexed with norms
-    LowSchemaField norms = new LowSchemaField("foo", "a", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    LowSchemaField norms = new LowSchemaField(a, "foo", "a", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
 
     // indexed without norms
-    LowSchemaField noNorms = new LowSchemaField("foo", "a", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    LowSchemaField noNorms = new LowSchemaField(a, "foo", "a", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     noNorms.disableNorms();
 
     // not indexed, but stored
-    LowSchemaField noIndex = new LowSchemaField("foo", "a", IndexOptions.NONE, false);
+    LowSchemaField noIndex = new LowSchemaField(a, "foo", "a", IndexOptions.NONE, false);
 
     // not indexed but stored, omitNorms is set
-    LowSchemaField noNormsNoIndex = new LowSchemaField("foo", "a", IndexOptions.NONE, false);
+    LowSchemaField noNormsNoIndex = new LowSchemaField(a, "foo", "a", IndexOptions.NONE, false);
     noNormsNoIndex.disableNorms();
 
     // not indexed nor stored (doesnt exist at all, we index a different field instead)
-    LowSchemaField emptyNorms = new LowSchemaField("bar", "a", IndexOptions.NONE, false);
+    LowSchemaField emptyNorms = new LowSchemaField(a, "bar", "a", IndexOptions.NONE, false);
     
     assertNotNull(getNorms("foo", norms, norms));
     assertNull(getNorms("foo", norms, noNorms));
@@ -295,23 +304,24 @@ public class TestAbuseSchema extends Luc
     // globalFieldNumbers.docValuesType map if the field existed, resulting in
     // potentially adding the same field with different DV types.
     Directory dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriterConfig conf = newIndexWriterConfig(a);
     IndexWriter writer = new IndexWriter(dir, conf);
     List<LowSchemaField> doc = new ArrayList<>();
 
-    LowSchemaField field = new LowSchemaField("f", "mock-value", IndexOptions.DOCS, false);
+    LowSchemaField field = new LowSchemaField(a, "f", "mock-value", IndexOptions.DOCS, false);
     field.disableNorms();
     field.doNotStore();
     doc.add(field);
 
-    field = new LowSchemaField("f", 5, IndexOptions.NONE, false);
+    field = new LowSchemaField(a, "f", 5, IndexOptions.NONE, false);
     field.setDocValuesType(DocValuesType.NUMERIC);
     doc.add(field);
     writer.addDocument(doc);
     writer.commit();
     
     doc = new ArrayList<>();
-    field = new LowSchemaField("f", new BytesRef("mock"), IndexOptions.NONE, false);
+    field = new LowSchemaField(a, "f", new BytesRef("mock"), IndexOptions.NONE, false);
     field.setDocValuesType(DocValuesType.BINARY);
     doc.add(field);
 
@@ -328,10 +338,11 @@ public class TestAbuseSchema extends Luc
   // LUCENE-6049
   public void testExcIndexingDocBeforeDocValues() throws Exception {
     Directory dir = newDirectory();
-    IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriterConfig iwc = new IndexWriterConfig(a);
     IndexWriter w = new IndexWriter(dir, iwc);
     List<LowSchemaField> doc = new ArrayList<>();
-    LowSchemaField field = new LowSchemaField("test", "value", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    LowSchemaField field = new LowSchemaField(a, "test", "value", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     field.setDocValuesType(DocValuesType.SORTED);
     field.doNotStore();
     field.setTokenStream(new TokenStream() {
@@ -356,12 +367,13 @@ public class TestAbuseSchema extends Luc
   public void testSameFieldNumbersAcrossSegments() throws Exception {
     for (int i = 0; i < 2; i++) {
       Directory dir = newDirectory();
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+      Analyzer a = new MockAnalyzer(random());
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(a)
                                                    .setMergePolicy(NoMergePolicy.INSTANCE));
 
       List<LowSchemaField> d1 = new ArrayList<>();
-      d1.add(new LowSchemaField("f1", "first field", IndexOptions.DOCS, false));
-      d1.add(new LowSchemaField("f2", "second field", IndexOptions.DOCS, false));
+      d1.add(new LowSchemaField(a, "f1", "first field", IndexOptions.DOCS, false));
+      d1.add(new LowSchemaField(a, "f2", "second field", IndexOptions.DOCS, false));
       writer.addDocument(d1);
 
       if (i == 1) {
@@ -373,12 +385,12 @@ public class TestAbuseSchema extends Luc
       }
 
       List<LowSchemaField> d2 = new ArrayList<>();
-      d2.add(new LowSchemaField("f2", "second field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
-      LowSchemaField field = new LowSchemaField("f1", "first field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+      d2.add(new LowSchemaField(a, "f2", "second field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+      LowSchemaField field = new LowSchemaField(a, "f1", "first field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
       field.enableTermVectors(false, false, false);
       d2.add(field);
-      d2.add(new LowSchemaField("f3", "third field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
-      d2.add(new LowSchemaField("f4", "fourth field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+      d2.add(new LowSchemaField(a, "f3", "third field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+      d2.add(new LowSchemaField(a, "f4", "fourth field", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
       writer.addDocument(d2);
 
       writer.close();
@@ -417,17 +429,18 @@ public class TestAbuseSchema extends Luc
 
   public void testEnablingNorms() throws IOException {
     Directory dir = newDirectory();
-    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(a)
                                           .setMaxBufferedDocs(10));
     // Enable norms for only 1 doc, pre flush
     for(int j=0;j<10;j++) {
       List<LowSchemaField> doc = new ArrayList<>();
       LowSchemaField f;
       if (j != 8) {
-        f = new LowSchemaField("field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        f = new LowSchemaField(a, "field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
         f.disableNorms();
       } else {
-        f = new LowSchemaField("field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        f = new LowSchemaField(a, "field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
         f.doNotStore();
       }
       doc.add(f);
@@ -443,17 +456,17 @@ public class TestAbuseSchema extends Luc
     assertEquals(10, hits.length);
     reader.close();
 
-    writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+    writer = new IndexWriter(dir, newIndexWriterConfig(a)
                              .setOpenMode(IndexWriterConfig.OpenMode.CREATE).setMaxBufferedDocs(10));
     // Enable norms for only 1 doc, post flush
     for(int j=0;j<27;j++) {
       List<LowSchemaField> doc = new ArrayList<>();
       LowSchemaField f;
       if (j != 26) {
-        f = new LowSchemaField("field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        f = new LowSchemaField(a, "field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
         f.disableNorms();
       } else {
-        f = new LowSchemaField("field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        f = new LowSchemaField(a, "field", "aaa", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
         f.doNotStore();
       }
       doc.add(f);
@@ -478,7 +491,8 @@ public class TestAbuseSchema extends Luc
       if (VERBOSE) {
         System.out.println("TEST: iter=" + i);
       }
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
+      Analyzer a = new MockAnalyzer(random());
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(a)
                                                   .setMaxBufferedDocs(2)
                                                   .setMergePolicy(newLogMergePolicy()));
       //LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy();
@@ -489,22 +503,22 @@ public class TestAbuseSchema extends Luc
 
       if (i == 7) {
         // Add empty docs here
-        LowSchemaField field = new LowSchemaField("content3", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        LowSchemaField field = new LowSchemaField(a, "content3", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
         field.doNotStore();
         doc.add(field);
       } else {
         if (i%2 == 0) {
-          doc.add(new LowSchemaField("content4", contents, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
-          doc.add(new LowSchemaField("content5", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+          doc.add(new LowSchemaField(a, "content4", contents, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+          doc.add(new LowSchemaField(a, "content5", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
         } else {
-          LowSchemaField field = new LowSchemaField("content5", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+          LowSchemaField field = new LowSchemaField(a, "content5", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
           field.doNotStore();
           doc.add(field);
         }
-        LowSchemaField field = new LowSchemaField("content1", contents, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+        LowSchemaField field = new LowSchemaField(a, "content1", contents, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
         field.doNotStore();
         doc.add(field);
-        doc.add(new LowSchemaField("content3", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+        doc.add(new LowSchemaField(a, "content3", "", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
       }
 
       for(int j=0;j<4;j++) {
@@ -526,7 +540,8 @@ public class TestAbuseSchema extends Luc
 
   public void testIndexStoreCombos() throws Exception {
     Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a));
     byte[] b = new byte[50];
     for(int i=0;i<50;i++) {
       b[i] = (byte) (i+77);
@@ -534,12 +549,12 @@ public class TestAbuseSchema extends Luc
 
     List<LowSchemaField> doc = new ArrayList<>();
 
-    LowSchemaField f = new LowSchemaField("binary", new BytesRef(b, 10, 17), IndexOptions.DOCS, true);
+    LowSchemaField f = new LowSchemaField(a, "binary", new BytesRef(b, 10, 17), IndexOptions.DOCS, true);
     final MockTokenizer doc1field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
     doc1field1.setReader(new StringReader("doc1field1"));
     f.setTokenStream(doc1field1);
 
-    LowSchemaField f2 = new LowSchemaField("string", "value", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
+    LowSchemaField f2 = new LowSchemaField(a, "string", "value", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true);
     final MockTokenizer doc1field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
     doc1field2.setReader(new StringReader("doc1field2"));
     f2.setTokenStream(doc1field2);
@@ -572,7 +587,7 @@ public class TestAbuseSchema extends Luc
     w.close();
 
     IndexReader ir = DirectoryReader.open(dir);
-    Document2 doc2 = ir.document(0);
+    Document doc2 = ir.document(0);
     IndexableField f3 = doc2.getField("binary");
     b = f3.binaryValue().bytes;
     assertTrue(b != null);
@@ -609,17 +624,17 @@ public class TestAbuseSchema extends Luc
     List<LowSchemaField> d = new ArrayList<>();
         
     // f1,f2,f3: docs only
-    d.add(new LowSchemaField("f1", "This field has docs only", IndexOptions.DOCS, true));
-    d.add(new LowSchemaField("f2", "This field has docs only", IndexOptions.DOCS, true));
-    d.add(new LowSchemaField("f3", "This field has docs only", IndexOptions.DOCS, true));
-
-    d.add(new LowSchemaField("f4", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
-    d.add(new LowSchemaField("f5", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
-    d.add(new LowSchemaField("f6", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
-    
-    d.add(new LowSchemaField("f7", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
-    d.add(new LowSchemaField("f8", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
-    d.add(new LowSchemaField("f9", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f1", "This field has docs only", IndexOptions.DOCS, true));
+    d.add(new LowSchemaField(analyzer, "f2", "This field has docs only", IndexOptions.DOCS, true));
+    d.add(new LowSchemaField(analyzer, "f3", "This field has docs only", IndexOptions.DOCS, true));
+
+    d.add(new LowSchemaField(analyzer, "f4", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    d.add(new LowSchemaField(analyzer, "f5", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    d.add(new LowSchemaField(analyzer, "f6", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    
+    d.add(new LowSchemaField(analyzer, "f7", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f8", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f9", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
         
     writer.addDocument(d);
     writer.forceMerge(1);
@@ -629,19 +644,19 @@ public class TestAbuseSchema extends Luc
     d = new ArrayList<>();
     
     // f1,f4,f7: docs only
-    d.add(new LowSchemaField("f1", "This field has docs only", IndexOptions.DOCS, true));
-    d.add(new LowSchemaField("f4", "This field has docs only", IndexOptions.DOCS, true));
-    d.add(new LowSchemaField("f7", "This field has docs only", IndexOptions.DOCS, true));
+    d.add(new LowSchemaField(analyzer, "f1", "This field has docs only", IndexOptions.DOCS, true));
+    d.add(new LowSchemaField(analyzer, "f4", "This field has docs only", IndexOptions.DOCS, true));
+    d.add(new LowSchemaField(analyzer, "f7", "This field has docs only", IndexOptions.DOCS, true));
 
     // f2, f5, f8: docs and freqs
-    d.add(new LowSchemaField("f2", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
-    d.add(new LowSchemaField("f5", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
-    d.add(new LowSchemaField("f8", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    d.add(new LowSchemaField(analyzer, "f2", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    d.add(new LowSchemaField(analyzer, "f5", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
+    d.add(new LowSchemaField(analyzer, "f8", "This field has docs and freqs", IndexOptions.DOCS_AND_FREQS, true));
     
     // f3, f6, f9: docs and freqs and positions
-    d.add(new LowSchemaField("f3", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
-    d.add(new LowSchemaField("f6", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
-    d.add(new LowSchemaField("f9", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f3", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f6", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f9", "This field has docs and freqs and positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
     writer.addDocument(d);
 
     // force merge
@@ -690,7 +705,7 @@ public class TestAbuseSchema extends Luc
     lmp.setNoCFSRatio(0.0);
 
     List<LowSchemaField> d = new ArrayList<>();
-    d.add(new LowSchemaField("f1", "This field has term freqs", IndexOptions.DOCS_AND_FREQS, true));
+    d.add(new LowSchemaField(analyzer, "f1", "This field has term freqs", IndexOptions.DOCS_AND_FREQS, true));
     for(int i=0;i<30;i++) {
       writer.addDocument(d);
     }
@@ -701,7 +716,7 @@ public class TestAbuseSchema extends Luc
     
     // now add some documents with positions, and check there is no prox after optimization
     d = new ArrayList<>();
-    d.add(new LowSchemaField("f1", "This field has term freqs", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+    d.add(new LowSchemaField(analyzer, "f1", "This field has term freqs", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
     
     for(int i=0;i<30;i++) {
       writer.addDocument(d);
@@ -727,17 +742,19 @@ public class TestAbuseSchema extends Luc
   /** make sure we downgrade positions and payloads correctly */
   public void testMixing() throws Exception {
     Directory dir = newDirectory();
-    RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
+    Analyzer a = new MockAnalyzer(random());
+
+    RandomIndexWriter iw = newRandomIndexWriter(dir, a);
     
     for (int i = 0; i < 20; i++) {
       List<LowSchemaField> doc = new ArrayList<>();
       if (i < 19 && random().nextBoolean()) {
         for (int j = 0; j < 50; j++) {
-          doc.add(new LowSchemaField("foo", "i have positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
+          doc.add(new LowSchemaField(a, "foo", "i have positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, true));
         }
       } else {
         for (int j = 0; j < 50; j++) {
-          doc.add(new LowSchemaField("foo", "i have no positions", IndexOptions.DOCS_AND_FREQS, true));
+          doc.add(new LowSchemaField(a, "foo", "i have no positions", IndexOptions.DOCS_AND_FREQS, true));
         }
       }
       iw.addDocument(doc);
@@ -759,9 +776,10 @@ public class TestAbuseSchema extends Luc
 
   public void testTypeChangeViaAddIndexesIR2() throws Exception {
     Directory dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
+    Analyzer a = new MockAnalyzer(random());
+    IndexWriterConfig conf = newIndexWriterConfig(a);
     IndexWriter writer = new IndexWriter(dir, conf);
-    LowSchemaField field = new LowSchemaField("dv", 0L, IndexOptions.NONE, false);
+    LowSchemaField field = new LowSchemaField(a, "dv", 0L, IndexOptions.NONE, false);
     field.setDocValuesType(DocValuesType.NUMERIC);
     List<LowSchemaField> doc = new ArrayList<>();
     doc.add(field);
@@ -774,7 +792,7 @@ public class TestAbuseSchema extends Luc
     IndexReader[] readers = new IndexReader[] {DirectoryReader.open(dir)};
     writer.addIndexes(readers);
     readers[0].close();
-    field = new LowSchemaField("dv", new BytesRef("foo"), IndexOptions.NONE, false);
+    field = new LowSchemaField(a, "dv", new BytesRef("foo"), IndexOptions.NONE, false);
     field.setDocValuesType(DocValuesType.BINARY);
     doc = new ArrayList<>();
     doc.add(field);

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java Sun Nov 30 11:07:09 2014
@@ -29,13 +29,8 @@ import org.apache.lucene.codecs.FilterCo
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.FieldTypes;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.PhraseQuery;
@@ -168,7 +163,7 @@ public class TestAddIndexes extends Luce
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addAtom("id", "" + (i % 10));
       doc.addLargeText("content", "bbb " + i);
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
@@ -203,7 +198,7 @@ public class TestAddIndexes extends Luce
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addAtom("id", "" + (i % 10));
       doc.addLargeText("content", "bbb " + i);
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
@@ -241,7 +236,7 @@ public class TestAddIndexes extends Luce
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addAtom("id", "" + (i % 10));
       doc.addLargeText("content", "bbb " + i);
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
@@ -516,7 +511,7 @@ public class TestAddIndexes extends Luce
 
   private void addDocs(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addLargeText("content", "aaa");
       writer.addDocument(doc);
     }
@@ -524,7 +519,7 @@ public class TestAddIndexes extends Luce
 
   private void addDocs2(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addLargeText("content", "bbb");
       writer.addDocument(doc);
     }
@@ -605,7 +600,7 @@ public class TestAddIndexes extends Luce
         .setMaxBufferedDocs(5).setMergePolicy(lmp));
 
     FieldTypes fieldTypes = writer.getFieldTypes();
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     fieldTypes.enableTermVectors("content");
     fieldTypes.enableTermVectorPositions("content");
     fieldTypes.enableTermVectorOffsets("content");
@@ -642,7 +637,7 @@ public class TestAddIndexes extends Luce
   // TODO: these are also in TestIndexWriter... add a simple doc-writing method
   // like this to LuceneTestCase?
   private void addDoc(IndexWriter writer) throws IOException {
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addLargeText("content", "aaa");
     writer.addDocument(doc);
   }
@@ -973,7 +968,7 @@ public class TestAddIndexes extends Luce
       dirs[i] = newDirectory();
       IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
       IndexWriter writer = new IndexWriter(dirs[i], conf);
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addAtom("id", "myid");
       writer.addDocument(doc);
       writer.close();
@@ -1003,7 +998,7 @@ public class TestAddIndexes extends Luce
   // just like addDocs but with ID, starting from docStart
   private void addDocsWithID(IndexWriter writer, int numDocs, int docStart) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addLargeText("content", "aaa");
       doc.addLargeText("id", "" + (docStart + i));
       writer.addDocument(doc);
@@ -1095,7 +1090,7 @@ public class TestAddIndexes extends Luce
     for (int i = 0; i < dirs.length; i++) {
       dirs[i] = new RAMDirectory();
       IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(new MockAnalyzer(random())));
-      Document2 d = w.newDocument();
+      Document d = w.newDocument();
       FieldTypes fieldTypes = w.getFieldTypes();
       fieldTypes.enableTermVectors("c");
       d.addLargeText("c", "v");
@@ -1141,7 +1136,7 @@ public class TestAddIndexes extends Luce
       IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
       conf.setCodec(new UnRegisteredCodec());
       IndexWriter w = new IndexWriter(toAdd, conf);
-      Document2 doc = w.newDocument();
+      Document doc = w.newDocument();
       doc.addLargeText("foo", "bar");
       w.addDocument(doc);
       w.close();
@@ -1178,7 +1173,7 @@ public class TestAddIndexes extends Luce
   public void testFieldNamesChanged() throws IOException {
     Directory d1 = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(random(), d1);
-    Document2 doc = w.newDocument();
+    Document doc = w.newDocument();
     doc.addAtom("f1", "doc1 field1");
     doc.addAtom("id", "1");
     w.addDocument(doc);
@@ -1206,7 +1201,7 @@ public class TestAddIndexes extends Luce
     w.close();
     assertEquals(2, r3.numDocs());
     for(int docID=0;docID<2;docID++) {
-      Document2 d = r3.document(docID);
+      Document d = r3.document(docID);
       if (d.getString("id").equals("1")) {
         assertEquals("doc1 field1", d.getString("f1"));
       } else {

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveChecksumFooter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveChecksumFooter.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveChecksumFooter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveChecksumFooter.java Sun Nov 30 11:07:09 2014
@@ -21,10 +21,7 @@ import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.LuceneTestCase;
@@ -40,7 +37,7 @@ public class TestAllFilesHaveChecksumFoo
     conf.setCodec(TestUtil.getDefaultCodec());
     RandomIndexWriter riw = new RandomIndexWriter(random(), dir, conf);
     for (int i = 0; i < 100; i++) {
-      Document2 doc = riw.newDocument();
+      Document doc = riw.newDocument();
       doc.addUniqueInt("id", i);
       // these fields should sometimes get term vectors, etc
       doc.addLargeText("body", TestUtil.randomUnicodeString(random()));

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAllFilesHaveCodecHeader.java Sun Nov 30 11:07:09 2014
@@ -23,13 +23,8 @@ import java.util.Map;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.CodecUtil;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.FieldTypes;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.LuceneTestCase;
@@ -50,7 +45,7 @@ public class TestAllFilesHaveCodecHeader
     fieldTypes.enableTermVectorPositions("vectors");
 
     for (int i = 0; i < 100; i++) {
-      Document2 doc = riw.newDocument();
+      Document doc = riw.newDocument();
       doc.addInt("id", i);
       doc.addLargeText("body", TestUtil.randomUnicodeString(random()));
       doc.addLargeText("vectors", TestUtil.randomUnicodeString(random()));

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAtomicUpdate.java Sun Nov 30 11:07:09 2014
@@ -79,7 +79,7 @@ public class TestAtomicUpdate extends Lu
 
       // Update all 100 docs...
       for(int i=0; i<100; i++) {
-        Document2 d = writer.newDocument();
+        Document d = writer.newDocument();
         d.addUniqueInt("id", i);
         d.addLargeText("contents", English.intToEnglish(i+10*count));
         writer.updateDocument(fieldTypes.newIntTerm("id", i), d);
@@ -118,7 +118,7 @@ public class TestAtomicUpdate extends Lu
 
     // Establish a base index of 100 docs:
     for(int i=0;i<100;i++) {
-      Document2 d = writer.newDocument();
+      Document d = writer.newDocument();
       d.addUniqueInt("id", i);
       d.addLargeText("contents", English.intToEnglish(i));
       if ((i-1)%7 == 0) {

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java Sun Nov 30 11:07:09 2014
@@ -25,10 +25,7 @@ import java.util.concurrent.ConcurrentLi
 import java.util.concurrent.CountDownLatch;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -80,7 +77,6 @@ public class TestBagOfPositions extends 
     }
 
     FieldTypes fieldTypes = iw.getFieldTypes();
-    Field prototype = newTextField("field", "", Field.Store.NO);
     if (random().nextBoolean()) {
       fieldTypes.disableNorms("field");
     }
@@ -118,7 +114,7 @@ public class TestBagOfPositions extends 
                   text.append(' ');
                   text.append(token);
                 }
-                Document2 document = iw.newDocument();
+                Document document = iw.newDocument();
                 document.addLargeText("field", text.toString());
                 iw.addDocument(document);
               }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java Sun Nov 30 11:07:09 2014
@@ -26,9 +26,7 @@ import java.util.concurrent.ConcurrentLi
 import java.util.concurrent.CountDownLatch;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
@@ -105,7 +103,7 @@ public class TestBagOfPostings extends L
                   text.append(token);
                   visited.add(token);
                 }
-                Document2 document = iw.newDocument();
+                Document document = iw.newDocument();
                 document.addLargeText("field", text.toString());
                 iw.addDocument(document);
               }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBinaryDocValuesUpdates.java Sun Nov 30 11:07:09 2014
@@ -12,15 +12,8 @@ import org.apache.lucene.analysis.MockTo
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.asserting.AssertingDocValuesFormat;
-import org.apache.lucene.document.BinaryDocValuesField;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.FieldTypes;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.NRTCachingDirectory;
@@ -28,7 +21,6 @@ import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.LuceneTestCase.Nightly;
 import org.apache.lucene.util.TestUtil;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -78,8 +70,8 @@ public class TestBinaryDocValuesUpdates 
     return bytes;
   }
   
-  private Document2 doc(IndexWriter w, int id) {
-    Document2 doc = w.newDocument();
+  private Document doc(IndexWriter w, int id) {
+    Document doc = w.newDocument();
     doc.addAtom("id", "doc-" + id);
     doc.addBinary("val", toBytes(id + 1));
     return doc;
@@ -366,7 +358,7 @@ public class TestBinaryDocValuesUpdates 
     fieldTypes.disableSorting("bdv");
 
     for (int i = 0; i < 4; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addAtom("dvUpdateKey", "dv");
       doc.addInt("ndv", i);
       doc.addBinary("bdv", new BytesRef(Integer.toString(i)));
@@ -418,7 +410,7 @@ public class TestBinaryDocValuesUpdates 
     fieldTypes.disableSorting("bdv2");
     
     for (int i = 0; i < 2; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addAtom("dvUpdateKey", "dv");
       doc.addBinary("bdv1", toBytes(i));
       doc.addBinary("bdv2", toBytes(i));
@@ -452,7 +444,7 @@ public class TestBinaryDocValuesUpdates 
     fieldTypes.disableSorting("bdv");
 
     for (int i = 0; i < 2; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addAtom("dvUpdateKey", "dv");
       if (i == 0) { // index only one document with value
         doc.addBinary("bdv", toBytes(5L));
@@ -483,7 +475,7 @@ public class TestBinaryDocValuesUpdates 
     IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
     IndexWriter writer = new IndexWriter(dir, conf);
     
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("key", "doc");
     doc.addAtom("foo", "bar");
     writer.addDocument(doc); // flushed document
@@ -523,7 +515,7 @@ public class TestBinaryDocValuesUpdates 
     FieldTypes fieldTypes = writer.getFieldTypes();
     fieldTypes.disableSorting("bdv");
     
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("key", "doc");
     doc.addBinary("bdv", toBytes(5L));
     doc.addShortText("sorted", "value");
@@ -556,7 +548,7 @@ public class TestBinaryDocValuesUpdates 
     FieldTypes fieldTypes = writer.getFieldTypes();
     fieldTypes.disableSorting("bdv");
     
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("key", "doc");
     doc.addBinary("bdv", toBytes(5L));
     writer.addDocument(doc); // flushed document
@@ -590,7 +582,7 @@ public class TestBinaryDocValuesUpdates 
     for (int rnd = 0; rnd < numRounds; rnd++) {
       int numDocs = atLeast(30);
       for (int i = 0; i < numDocs; i++) {
-        Document2 doc = writer.newDocument();
+        Document doc = writer.newDocument();
         doc.addAtom("key", "doc");
         doc.addBinary("bdv", toBytes(-1));
         doc.addUniqueInt("id", docid++);
@@ -619,7 +611,7 @@ public class TestBinaryDocValuesUpdates 
       // forceMerge is called, the index will be with one segment and deletes
       // and some MPs might now merge it, thereby invalidating test's
       // assumption that the reader has no deletes).
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addUniqueInt("id", docid++);
       doc.addAtom("key", "doc");
       doc.addBinary("bdv", toBytes(value));
@@ -657,7 +649,7 @@ public class TestBinaryDocValuesUpdates 
     FieldTypes fieldTypes = writer.getFieldTypes();
     fieldTypes.disableSorting("bdv");
     
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("k1", "v1");
     doc.addAtom("k2", "v2");
     doc.addBinary("bdv", toBytes(5L));
@@ -711,7 +703,7 @@ public class TestBinaryDocValuesUpdates 
       int numDocs = atLeast(5);
 //      System.out.println("[" + Thread.currentThread().getName() + "]: round=" + i + ", numDocs=" + numDocs);
       for (int j = 0; j < numDocs; j++) {
-        Document2 doc = writer.newDocument();
+        Document doc = writer.newDocument();
         doc.addAtom("id", "doc-" + docID);
         doc.addAtom("key", "all"); // update key
         // add all fields with their current value
@@ -785,7 +777,7 @@ public class TestBinaryDocValuesUpdates 
     fieldTypes.disableSorting("bdv");
 
     // first segment with BDV
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("id", "doc0");
     doc.addBinary("bdv", toBytes(3L));
     writer.addDocument(doc);
@@ -843,7 +835,7 @@ public class TestBinaryDocValuesUpdates 
     fieldTypes.disableSorting("bdv");
 
     // first segment with BDV
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("id", "doc0");
     doc.addAtom("bdvmock", "mock-value");
     doc.addBinary("bdv", toBytes(5L));
@@ -884,7 +876,7 @@ public class TestBinaryDocValuesUpdates 
     fieldTypes.disableSorting("f");
     
     // nocommit use low schema API here:
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("fmock", "mock-value");
     doc.addBinary("f", toBytes(5L));
     writer.addDocument(doc);
@@ -919,7 +911,7 @@ public class TestBinaryDocValuesUpdates 
 
     final int numDocs = atLeast(2000);
     for (int i = 0; i < numDocs; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addUniqueAtom("id", "doc" + i);
       double group = random().nextDouble();
       String g;
@@ -963,7 +955,7 @@ public class TestBinaryDocValuesUpdates 
               final String cf = "cf" + field;
 //              System.out.println("[" + Thread.currentThread().getName() + "] numUpdates=" + numUpdates + " updateTerm=" + t + " field=" + field);
               long updValue = random.nextInt();
-              Document2 update = writer.newDocument();
+              Document update = writer.newDocument();
               update.disableExistsField();
               update.addBinary(f, toBytes(updValue));
               update.addBinary(cf, toBytes(updValue*2));
@@ -1053,7 +1045,7 @@ public class TestBinaryDocValuesUpdates 
     fieldTypes.disableSorting("cf");
     final int numDocs = atLeast(10);
     for (int i = 0; i < numDocs; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addUniqueAtom("id", "doc" + i);
       long value = random().nextInt();
       doc.addBinary("f", toBytes(value));
@@ -1066,7 +1058,7 @@ public class TestBinaryDocValuesUpdates 
       int doc = random().nextInt(numDocs);
       Term t = new Term("id", "doc" + doc);
       long value = random().nextLong();
-      Document2 update = writer.newDocument();
+      Document update = writer.newDocument();
       update.disableExistsField();
       update.addBinary("f", toBytes(value));
       update.addBinary("cf", toBytes(value*2));
@@ -1100,7 +1092,7 @@ public class TestBinaryDocValuesUpdates 
     FieldTypes fieldTypes = writer.getFieldTypes();
     fieldTypes.disableSorting("f1");
     fieldTypes.disableSorting("f2");
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("id", "d0");
     doc.addBinary("f1", toBytes(5L));
     doc.addBinary("f2", toBytes(13L));
@@ -1154,7 +1146,7 @@ public class TestBinaryDocValuesUpdates 
 
     // create first index
     for (int i = 0; i < numDocs; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addAtom("id", RandomPicks.randomFrom(random(), randomTerms));
       doc.addBinary("bdv", toBytes(4L));
       doc.addBinary("control", toBytes(8L));
@@ -1168,7 +1160,7 @@ public class TestBinaryDocValuesUpdates 
     // update some docs to a random value
     long value = random().nextInt();
     Term term = new Term("id", RandomPicks.randomFrom(random(), randomTerms));
-    Document2 update = writer.newDocument();
+    Document update = writer.newDocument();
     update.disableExistsField();
     update.addBinary("bdv", toBytes(value));
     update.addBinary("control", toBytes(value*2));
@@ -1213,7 +1205,7 @@ public class TestBinaryDocValuesUpdates 
     fieldTypes.disableSorting("f1");
     fieldTypes.disableSorting("f2");
     
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("id", "d0");
     doc.addBinary("f1", toBytes(1L));
     doc.addBinary("f2", toBytes(1L));
@@ -1267,7 +1259,7 @@ public class TestBinaryDocValuesUpdates 
     
     // build a large index with many BDV fields and update terms
     for (int i = 0; i < numDocs; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       int numUpdateTerms = TestUtil.nextInt(random, 1, numTerms / 10);
       for (int j = 0; j < numUpdateTerms; j++) {
         doc.addAtom("upd", RandomPicks.randomFrom(random, updateTerms));
@@ -1291,7 +1283,7 @@ public class TestBinaryDocValuesUpdates 
       int field = random.nextInt(numBinaryFields);
       Term updateTerm = new Term("upd", RandomPicks.randomFrom(random, updateTerms));
       long value = random.nextInt();
-      Document2 update = writer.newDocument();
+      Document update = writer.newDocument();
       update.disableExistsField();
       update.addBinary("f" + field, toBytes(value));
       update.addBinary("cf" + field, toBytes(value*2));
@@ -1325,7 +1317,7 @@ public class TestBinaryDocValuesUpdates 
     fieldTypes.disableSorting("f1");
     fieldTypes.disableSorting("f2");
     
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("upd", "t1");
     doc.addAtom("upd", "t2");
     doc.addBinary("f1", toBytes(1L));
@@ -1353,7 +1345,7 @@ public class TestBinaryDocValuesUpdates 
     FieldTypes fieldTypes = writer.getFieldTypes();
     fieldTypes.disableSorting("f1");
 
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("id", "doc");
     doc.addBinary("f1", toBytes(1L));
     writer.addDocument(doc);
@@ -1379,7 +1371,7 @@ public class TestBinaryDocValuesUpdates 
     FieldTypes fieldTypes = writer.getFieldTypes();
     fieldTypes.disableSorting("f1");
     
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addAtom("id", "doc");
     doc.addBinary("f1", toBytes(1L));
     writer.addDocument(doc);

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestBinaryTerms.java Sun Nov 30 11:07:09 2014
@@ -19,11 +19,7 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
@@ -45,7 +41,7 @@ public class TestBinaryTerms extends Luc
       bytes.bytes[0] = (byte) i;
       bytes.bytes[1] = (byte) (255 - i);
       bytes.length = 2;
-      Document2 doc = iw.newDocument();
+      Document doc = iw.newDocument();
       doc.addStored("id", "" + i);
       doc.addLargeText("bytes", tokenStream);
       iw.addDocument(doc);

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java Sun Nov 30 11:07:09 2014
@@ -26,12 +26,8 @@ import java.util.List;
 import org.apache.lucene.analysis.CannedTokenStream;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.Token;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.FieldTypes;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.util.IOUtils;
@@ -50,7 +46,7 @@ public class TestCheckIndex extends Luce
     fieldTypes.enableTermVectorOffsets("field");
 
     for(int i=0;i<19;i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addLargeText("field", "aaa"+i);
       writer.addDocument(doc);
     }
@@ -113,7 +109,7 @@ public class TestCheckIndex extends Luce
     fieldTypes.enableTermVectorOffsets("foo");
     fieldTypes.disableHighlighting("foo");
 
-    Document2 doc = iw.newDocument();
+    Document doc = iw.newDocument();
     doc.addLargeText("foo", new CannedTokenStream(
         new Token("bar", 5, 10), new Token("bar", 1, 4)
     ));

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecHoldsOpenFiles.java Sun Nov 30 11:07:09 2014
@@ -19,9 +19,7 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.BaseDirectoryWrapper;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -34,7 +32,7 @@ public class TestCodecHoldsOpenFiles ext
     RandomIndexWriter w = new RandomIndexWriter(random(), d);
     int numDocs = atLeast(100);
     for(int i=0;i<numDocs;i++) {
-      Document2 doc = w.newDocument();
+      Document doc = w.newDocument();
       doc.addLargeText("foo", "bar");
       w.addDocument(doc);
     }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java Sun Nov 30 11:07:09 2014
@@ -27,10 +27,7 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -800,7 +797,7 @@ public class TestCodecs extends LuceneTe
     // we don't need many documents to assert this, but don't use one document either
     int numDocs = atLeast(random, 50);
     for (int i = 0; i < numDocs; i++) {
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addAtom("f", "doc");
       writer.addDocument(doc);
     }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Sun Nov 30 11:07:09 2014
@@ -24,12 +24,8 @@ import java.util.concurrent.atomic.Atomi
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldTypes;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -94,12 +90,12 @@ public class TestConcurrentMergeSchedule
       }
 
       for(int j=0;j<20;j++) {
-        Document2 doc = writer.newDocument();
+        Document doc = writer.newDocument();
         doc.addInt("id", i*20+j);
         writer.addDocument(doc);
       }
 
-      Document2 doc = writer.newDocument();
+      Document doc = writer.newDocument();
       doc.addInt("id", i*20+19);
       // must cycle here because sometimes the merge flushes
       // the doc we just added and so there's nothing to
@@ -150,7 +146,7 @@ public class TestConcurrentMergeSchedule
         System.out.println("\nTEST: cycle");
       }
       for(int j=0;j<100;j++) {
-        Document2 doc = writer.newDocument();
+        Document doc = writer.newDocument();
         doc.addUniqueInt("id", i*100+j);
         writer.addDocument(doc);
       }
@@ -190,7 +186,7 @@ public class TestConcurrentMergeSchedule
       }
 
       for(int j=0;j<21;j++) {
-        Document2 doc = writer.newDocument();
+        Document doc = writer.newDocument();
         doc.addLargeText("content", "a b c");
         writer.addDocument(doc);
       }
@@ -228,7 +224,7 @@ public class TestConcurrentMergeSchedule
     for(int iter=0;iter<numIters;iter++) {
 
       for(int j=0;j<201;j++) {
-        Document2 doc = writer.newDocument();
+        Document doc = writer.newDocument();
         doc.addUniqueInt("id",iter*201+j);
         writer.addDocument(doc);
       }
@@ -328,7 +324,7 @@ public class TestConcurrentMergeSchedule
     tmp.setSegmentsPerTier(2);
 
     IndexWriter w = new IndexWriter(dir, iwc);
-    Document2 doc = w.newDocument();
+    Document doc = w.newDocument();
     doc.addLargeText("field", "field");
     while(enoughMergesWaiting.getCount() != 0 && !failed.get()) {
       for(int i=0;i<10;i++) {
@@ -377,7 +373,7 @@ public class TestConcurrentMergeSchedule
     FieldTypes fieldTypes = w.getFieldTypes();
 
     for(int i=0;i<1000;i++) {
-      Document2 doc = w.newDocument();
+      Document doc = w.newDocument();
       doc.addUniqueInt("id", i);
       w.addDocument(doc);
 
@@ -472,7 +468,7 @@ public class TestConcurrentMergeSchedule
         }
       });
     IndexWriter w = new IndexWriter(dir, iwc);
-    w.addDocument(new Document());
+    w.addDocument(w.newDocument());
     w.forceMerge(1);
     assertTrue(wasCalled.get());
 

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java Sun Nov 30 11:07:09 2014
@@ -20,14 +20,8 @@ package org.apache.lucene.index;
 import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.FieldTypes;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.FailOnNonBulkMergesInfoStream;
 import org.apache.lucene.util.LuceneTestCase;
@@ -43,7 +37,7 @@ public class TestConsistentFieldNumbers 
                                                  .setMergePolicy(NoMergePolicy.INSTANCE));
 
     FieldTypes fieldTypes = writer.getFieldTypes();
-    Document2 d1 = writer.newDocument();
+    Document d1 = writer.newDocument();
     d1.addLargeText("f1", "first field");
     d1.addLargeText("f2", "second field");
     writer.addDocument(d1);
@@ -53,7 +47,7 @@ public class TestConsistentFieldNumbers 
                                      .setMergePolicy(NoMergePolicy.INSTANCE));
 
     fieldTypes = writer.getFieldTypes();
-    Document2 d2 = writer.newDocument();
+    Document d2 = writer.newDocument();
     fieldTypes.enableTermVectors("f1");
     d2.addLargeText("f2", "second field");
     d2.addLargeText("f1", "first field");
@@ -96,7 +90,7 @@ public class TestConsistentFieldNumbers 
         FieldTypes fieldTypes = writer.getFieldTypes();
         fieldTypes.disableExistsFilters();
 
-        Document2 d = writer.newDocument();
+        Document d = writer.newDocument();
         d.addLargeText("f1", "d1 first field");
         d.addLargeText("f2", "d1 second field");
         writer.addDocument(d);
@@ -112,7 +106,7 @@ public class TestConsistentFieldNumbers 
       {
         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                     .setMergePolicy(NoMergePolicy.INSTANCE));
-        Document2 d = writer.newDocument();
+        Document d = writer.newDocument();
         d.addLargeText("f1", "d2 first field");
         d.addStored("f3", new byte[] { 1, 2, 3 });
         writer.addDocument(d);
@@ -131,7 +125,7 @@ public class TestConsistentFieldNumbers 
       {
         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                     .setMergePolicy(NoMergePolicy.INSTANCE));
-        Document2 d = writer.newDocument();
+        Document d = writer.newDocument();
         d.addLargeText("f1", "d3 first field");
         d.addLargeText("f2", "d3 second field");
         d.addStored("f3", new byte[] { 1, 2, 3, 4, 5 });
@@ -199,7 +193,7 @@ public class TestConsistentFieldNumbers 
     }
 
     for (int i = 0; i < NUM_DOCS; i++) {
-      Document2 d = writer.newDocument();
+      Document d = writer.newDocument();
       for (int j = 0; j < docs[i].length; j++) {
         addField(fieldTypes, d, docs[i][j]);
       }
@@ -207,7 +201,7 @@ public class TestConsistentFieldNumbers 
       writer.addDocument(d);
     }
 
-    Document2 d = writer.newDocument();
+    Document d = writer.newDocument();
 
     for(int i=0;i<MAX_FIELDS;i++) {
       addField(fieldTypes, d, i);
@@ -230,15 +224,9 @@ public class TestConsistentFieldNumbers 
     dir.close();
   }
 
-  private void addField(FieldTypes fieldTypes, Document2 d, int number) {
+  private void addField(FieldTypes fieldTypes, Document d, int number) {
     String fieldName = "" + number;
 
-    FieldType customType15 = new FieldType(TextField.TYPE_NOT_STORED);
-    customType15.setTokenized(false);
-    customType15.setStoreTermVectors(true);
-    customType15.setStoreTermVectorOffsets(true);
-    customType15.setStoreTermVectorPositions(true);
-    
     int mode = number % 16;
     switch (mode) {
     case 0:

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCrash.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCrash.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCrash.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCrash.java Sun Nov 30 11:07:09 2014
@@ -21,9 +21,7 @@ import java.io.IOException;
 import java.util.Random;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.store.NoLockFactory;
@@ -43,7 +41,7 @@ public class TestCrash extends LuceneTes
       writer.commit();
     }
     
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addLargeText("content", "aaa");
     doc.addLargeText("id", "0");
     for(int i=0;i<157;i++) {

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCrashCausesCorruptIndex.java Sun Nov 30 11:07:09 2014
@@ -21,9 +21,7 @@ import java.io.IOException;
 import java.nio.file.Path;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
@@ -125,8 +123,8 @@ public class TestCrashCausesCorruptIndex
   /**
    * Gets a document with content "my dog has fleas".
    */
-  private Document2 getDocument(IndexWriter w) {
-    Document2 document = w.newDocument();
+  private Document getDocument(IndexWriter w) {
+    Document document = w.newDocument();
     document.addLargeText(TEXT_FIELD, "my dog has fleas");
     return document;
   }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java Sun Nov 30 11:07:09 2014
@@ -19,10 +19,7 @@ package org.apache.lucene.index;
 import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.TextField;
 import org.apache.lucene.search.CollectionStatistics;
 import org.apache.lucene.search.TermStatistics;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
@@ -53,7 +50,7 @@ public class TestCustomNorms extends Luc
     final LineFileDocs docs = new LineFileDocs(writer.w, random());
     int num = atLeast(100);
     for (int i = 0; i < num; i++) {
-      Document2 doc = docs.nextDoc();
+      Document doc = docs.nextDoc();
       float nextFloat = random().nextFloat();
       doc.addLargeText(floatTestField, "" + nextFloat, nextFloat);
       writer.addDocument(doc);
@@ -67,7 +64,7 @@ public class TestCustomNorms extends Luc
     NumericDocValues norms = open.getNormValues(floatTestField);
     assertNotNull(norms);
     for (int i = 0; i < open.maxDoc(); i++) {
-      Document2 document = open.document(i);
+      Document document = open.document(i);
       float expected = Float.parseFloat(document.getString(floatTestField));
       assertEquals(expected, Float.intBitsToFloat((int)norms.get(i)), 0.0f);
     }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java Sun Nov 30 11:07:09 2014
@@ -26,9 +26,7 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.IndexSearcher;
@@ -741,14 +739,14 @@ public class TestDeletionPolicy extends 
   }
 
   private void addDocWithID(IndexWriter writer, int id) throws IOException {
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addLargeText("content", "aaa");
     doc.addUniqueInt("id", id);
     writer.addDocument(doc);
   }
   
   private void addDoc(IndexWriter writer) throws IOException {
-    Document2 doc = writer.newDocument();
+    Document doc = writer.newDocument();
     doc.addLargeText("content", "aaa");
     writer.addDocument(doc);
   }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java?rev=1642535&r1=1642534&r2=1642535&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java Sun Nov 30 11:07:09 2014
@@ -31,15 +31,11 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldTypes;
-import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.search.ConstantScoreQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
@@ -684,8 +680,8 @@ public class TestDemoParallelLeafReader 
         // Slowly parse the stored field into a new doc values field:
         for(int i=0;i<maxDoc;i++) {
           // TODO: is this still O(blockSize^2)?
-          Document2 oldDoc = reader.document(i);
-          Document2 newDoc = w.newDocument();
+          Document oldDoc = reader.document(i);
+          Document newDoc = w.newDocument();
           long value = Long.parseLong(oldDoc.getString("text").split(" ")[1]);
           newDoc.addLong("number", value);
           w.addDocument(newDoc);
@@ -734,8 +730,8 @@ public class TestDemoParallelLeafReader 
           // Must slowly parse the stored field into a new doc values field:
           for(int i=0;i<maxDoc;i++) {
             // TODO: is this still O(blockSize^2)?
-            Document2 oldDoc = reader.document(i);
-            Document2 newDoc = w.newDocument();
+            Document oldDoc = reader.document(i);
+            Document newDoc = w.newDocument();
             long value = Long.parseLong(oldDoc.getString("text").split(" ")[1]);
             newDoc.addLong("number_" + newSchemaGen, value);
             newDoc.addLong("number", value);
@@ -747,8 +743,8 @@ public class TestDemoParallelLeafReader 
           assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues);
           for(int i=0;i<maxDoc;i++) {
             // TODO: is this still O(blockSize^2)?
-            Document2 oldDoc = reader.document(i);
-            Document2 newDoc = w.newDocument();
+            Document oldDoc = reader.document(i);
+            Document newDoc = w.newDocument();
             newDoc.addLong("number_" + newSchemaGen, oldValues.get(i));
             w.addDocument(newDoc);
           }
@@ -777,7 +773,7 @@ public class TestDemoParallelLeafReader 
         int maxDoc = r.maxDoc();
         boolean failed = false;
         for(int i=0;i<maxDoc;i++) {
-          Document2 oldDoc = r.document(i);
+          Document oldDoc = r.document(i);
           long value = Long.parseLong(oldDoc.getString("text").split(" ")[1]);
           if (value != numbers.get(i)) {
             if (DEBUG) System.out.println("FAIL: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i) + " numbers=" + numbers);
@@ -820,8 +816,8 @@ public class TestDemoParallelLeafReader 
           // Must slowly parse the stored field into a new doc values field:
           for(int i=0;i<maxDoc;i++) {
             // TODO: is this still O(blockSize^2)?
-            Document2 oldDoc = reader.document(i);
-            Document2 newDoc = w.newDocument();
+            Document oldDoc = reader.document(i);
+            Document newDoc = w.newDocument();
             long value = Long.parseLong(oldDoc.getString("text").split(" ")[1]);
             newDoc.addLong("number", newSchemaGen*value);
             w.addDocument(newDoc);
@@ -832,8 +828,8 @@ public class TestDemoParallelLeafReader 
           assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues);
           for(int i=0;i<maxDoc;i++) {
             // TODO: is this still O(blockSize^2)?
-            Document2 oldDoc = reader.document(i);
-            Document2 newDoc = w.newDocument();
+            Document oldDoc = reader.document(i);
+            Document newDoc = w.newDocument();
             newDoc.addLong("number", newSchemaGen*(oldValues.get(i)/oldSchemaGen));
             w.addDocument(newDoc);
           }
@@ -866,7 +862,7 @@ public class TestDemoParallelLeafReader 
         int maxDoc = r.maxDoc();
         boolean failed = false;
         for(int i=0;i<maxDoc;i++) {
-          Document2 oldDoc = r.document(i);
+          Document oldDoc = r.document(i);
           long value = Long.parseLong(oldDoc.getString("text").split(" ")[1]);
           value *= schemaGen;
           if (value != numbers.get(i)) {
@@ -889,7 +885,7 @@ public class TestDemoParallelLeafReader 
     ReindexingReader reindexer = getReindexerNewDVFields(createTempDir(), currentSchemaGen);
     reindexer.commit();
 
-    Document2 doc = reindexer.w.newDocument();
+    Document doc = reindexer.w.newDocument();
     doc.addLargeText("text", "number " + random().nextLong());
     reindexer.w.addDocument(doc);
 
@@ -963,7 +959,7 @@ public class TestDemoParallelLeafReader 
         reindexer = getReindexerNewDVFields(root, currentSchemaGen);
       }
 
-      Document2 doc = reindexer.w.newDocument();
+      Document doc = reindexer.w.newDocument();
       String id;
       String updateID;
       if (maxID > 0 && random().nextInt(10) == 7) {
@@ -1050,7 +1046,7 @@ public class TestDemoParallelLeafReader 
         reindexer = getReindexerSameDVField(root, currentSchemaGen, mergingSchemaGen);
       }
 
-      Document2 doc = reindexer.w.newDocument();
+      Document doc = reindexer.w.newDocument();
       String id;
       String updateID;
       if (maxID > 0 && random().nextInt(10) == 7) {
@@ -1126,7 +1122,7 @@ public class TestDemoParallelLeafReader 
           if (numbers != null) {
             int maxDoc = leaf.maxDoc();
             for(int i=0;i<maxDoc;i++) {
-              Document2 doc = leaf.document(i);
+              Document doc = leaf.document(i);
               long value = Long.parseLong(doc.getString("text").split(" ")[1]);
               long dvValue = numbers.get(i);
               if (value == 0) {
@@ -1147,7 +1143,7 @@ public class TestDemoParallelLeafReader 
     // Start with initial empty commit:
     reindexer.commit();
 
-    Document2 doc = reindexer.w.newDocument();
+    Document doc = reindexer.w.newDocument();
     doc.addLargeText("text", "number " + random().nextLong());
     reindexer.w.addDocument(doc);
 
@@ -1222,7 +1218,7 @@ public class TestDemoParallelLeafReader 
         reindexer = getReindexer(root);
       }
 
-      Document2 doc = reindexer.w.newDocument();
+      Document doc = reindexer.w.newDocument();
       String id;
       String updateID;
       if (maxID > 0 && random().nextInt(10) == 7) {
@@ -1290,7 +1286,7 @@ public class TestDemoParallelLeafReader 
     boolean failed = false;
     long t0 = System.currentTimeMillis();
     for(int i=0;i<maxDoc;i++) {
-      Document2 oldDoc = r.document(i);
+      Document oldDoc = r.document(i);
       long value = multiplier * Long.parseLong(oldDoc.getString("text").split(" ")[1]);
       if (value != numbers.get(i)) {
         System.out.println("FAIL: docID=" + i + " " + oldDoc+ " value=" + value + " number=" + numbers.get(i) + " numbers=" + numbers);