You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2014/10/28 10:04:07 UTC

svn commit: r1634820 [2/4] - in /lucene/dev/branches/lucene6005/lucene: core/src/java/org/apache/lucene/document/ core/src/java/org/apache/lucene/index/ core/src/java/org/apache/lucene/search/ core/src/test/org/apache/lucene/ core/src/test/org/apache/l...

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestDemo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestDemo.java?rev=1634820&r1=1634819&r2=1634820&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestDemo.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestDemo.java Tue Oct 28 09:04:05 2014
@@ -21,13 +21,14 @@ import java.io.IOException;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.*;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -48,10 +49,10 @@ public class TestDemo extends LuceneTest
     // To store an index on disk, use this instead:
     // Directory directory = FSDirectory.open(new File("/tmp/testindex"));
     RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, analyzer);
-    Document doc = new Document();
+    Document2 doc = iwriter.newDocument();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;
-    doc.add(newTextField("fieldname", text, Field.Store.YES));
+    doc.addLargeText("fieldname", text);
     iwriter.addDocument(doc);
     iwriter.close();
     

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java?rev=1634820&r1=1634819&r2=1634820&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java Tue Oct 28 09:04:05 2014
@@ -20,8 +20,10 @@ package org.apache.lucene;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -38,24 +40,6 @@ import org.apache.lucene.util.TestUtil;
 
 public class TestExternalCodecs extends LuceneTestCase {
 
-  private static final class CustomPerFieldCodec extends AssertingCodec {
-    
-    private final PostingsFormat ramFormat = PostingsFormat.forName("RAMOnly");
-    private final PostingsFormat defaultFormat = TestUtil.getDefaultPostingsFormat();
-    private final PostingsFormat memoryFormat = PostingsFormat.forName("Memory");
-
-    @Override
-    public PostingsFormat getPostingsFormatForField(String field) {
-      if (field.equals("field2") || field.equals("id")) {
-        return memoryFormat;
-      } else if (field.equals("field1")) {
-        return defaultFormat;
-      } else {
-        return ramFormat;
-      }
-    }
-  }
-
   // tests storing "id" and "field2" fields as pulsing codec,
   // whose term sort is backwards unicode code point, and
   // storing "field1" as a custom entirely-in-RAM codec
@@ -70,22 +54,21 @@ public class TestExternalCodecs extends 
     dir.setCheckIndexOnClose(false); // we use a custom codec provider
     IndexWriter w = new IndexWriter(
         dir,
-        newIndexWriterConfig(new MockAnalyzer(random())).
-        setCodec(new CustomPerFieldCodec()).
-            setMergePolicy(newLogMergePolicy(3))
+        newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(3))
     );
-    Document doc = new Document();
-    // uses default codec:
-    doc.add(newTextField("field1", "this field uses the standard codec as the test", Field.Store.NO));
-    // uses memory codec:
-    Field field2 = newTextField("field2", "this field uses the memory codec as the test", Field.Store.NO);
-    doc.add(field2);
-    
-    Field idField = newStringField("id", "", Field.Store.NO);
 
-    doc.add(idField);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setPostingsFormat("id", "RAMOnly");
+    fieldTypes.setPostingsFormat("field1", TestUtil.getDefaultPostingsFormat().getName());
+    fieldTypes.setPostingsFormat("field2", "Memory");
+    
     for(int i=0;i<NUM_DOCS;i++) {
-      idField.setStringValue(""+i);
+      Document2 doc = w.newDocument();
+      // uses default codec:
+      doc.addLargeText("field1", "this field uses the standard codec as the test");
+      // uses memory codec:
+      doc.addLargeText("field2", "this field uses the memory codec as the test");
+      doc.addAtom("id", ""+i);
       w.addDocument(doc);
       if ((i+1)%10 == 0) {
         w.commit();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java?rev=1634820&r1=1634819&r2=1634820&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java Tue Oct 28 09:04:05 2014
@@ -16,15 +16,18 @@ package org.apache.lucene;
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+import java.io.IOException;
+
 import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.ConcurrentMergeScheduler;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LogMergePolicy;
-import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.MergePolicy.OneMerge;
+import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.MergeScheduler;
 import org.apache.lucene.index.MergeTrigger;
 import org.apache.lucene.store.Directory;
@@ -32,8 +35,6 @@ import org.apache.lucene.store.MockDirec
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.util.LuceneTestCase;
 
-import java.io.IOException;
-
 /**
  * Holds tests cases to verify external APIs are accessible
  * while not being in org.apache.lucene.index package.
@@ -89,18 +90,19 @@ public class TestMergeSchedulerExternal 
     MockDirectoryWrapper dir = newMockDirectory();
     dir.failOn(new FailOnlyOnMerge());
 
-    Document doc = new Document();
-    Field idField = newStringField("id", "", Field.Store.YES);
-    doc.add(idField);
-    
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
         .setMergeScheduler(new MyMergeScheduler())
         .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
         .setMergePolicy(newLogMergePolicy()));
+
+    Document2 doc = writer.newDocument();
+    doc.addAtom("id", "");
+    
     LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy();
     logMP.setMergeFactor(10);
-    for(int i=0;i<20;i++)
+    for(int i=0;i<20;i++) {
       writer.addDocument(doc);
+    }
 
     ((MyMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
     writer.close();
@@ -137,9 +139,9 @@ public class TestMergeSchedulerExternal 
     IndexWriterConfig conf = new IndexWriterConfig(null);
     conf.setMergeScheduler(new ReportingMergeScheduler());
     IndexWriter writer = new IndexWriter(dir, conf);
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit(); // trigger flush
-    writer.addDocument(new Document());
+    writer.addDocument(writer.newDocument());
     writer.commit(); // trigger flush
     writer.forceMerge(1);
     writer.close();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestSearch.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestSearch.java?rev=1634820&r1=1634819&r2=1634820&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestSearch.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestSearch.java Tue Oct 28 09:04:05 2014
@@ -46,8 +46,8 @@ public class TestSearch extends LuceneTe
       
       IndexWriter writer = new IndexWriter(directory, conf);
       try {
-        Document d = new Document();
-        d.add(newTextField("foo", "bar", Field.Store.YES));
+        Document2 d = writer.newDocument();
+        d.addLargeText("foo", "bar");
         writer.addDocument(d);
       } finally {
         writer.close();
@@ -125,9 +125,9 @@ public class TestSearch extends LuceneTe
         "a c e a b c"
       };
       for (int j = 0; j < docs.length; j++) {
-        Document d = new Document();
-        d.add(newTextField("contents", docs[j], Field.Store.YES));
-        d.add(new IntField("id", j, Field.Store.NO));
+        Document2 d = writer.newDocument();
+        d.addLargeText("contents", docs[j]);
+        d.addInt("id", j);
         writer.addDocument(d);
       }
       writer.close();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=1634820&r1=1634819&r2=1634820&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java Tue Oct 28 09:04:05 2014
@@ -79,10 +79,9 @@ public class TestSearchForDuplicates ext
       }
 
       for (int j = 0; j < MAX_DOCS; j++) {
-        Document d = new Document();
-        d.add(newTextField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES));
-        d.add(new IntField(ID_FIELD, j, Field.Store.YES));
-        d.add(new NumericDocValuesField(ID_FIELD, j));
+        Document2 d = writer.newDocument();
+        d.addLargeText(PRIORITY_FIELD, HIGH_PRIORITY);
+        d.addInt(ID_FIELD, j);
         writer.addDocument(d);
       }
       writer.close();

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java?rev=1634820&r1=1634819&r2=1634820&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java Tue Oct 28 09:04:05 2014
@@ -20,13 +20,14 @@ package org.apache.lucene.analysis;
 
 import java.io.IOException;
 
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.store.Directory;
@@ -38,7 +39,7 @@ public class TestCachingTokenFilter exte
   public void testCaching() throws IOException {
     Directory dir = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
-    Document doc = new Document();
+    Document2 doc = writer.newDocument();
     TokenStream stream = new TokenStream() {
       private int index = 0;
       private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
@@ -60,7 +61,7 @@ public class TestCachingTokenFilter exte
     
     stream = new CachingTokenFilter(stream);
     
-    doc.add(new TextField("preanalyzed", stream));
+    doc.addLargeText("preanalyzed", stream);
     
     // 1) we consume all tokens twice before we add the doc to the index
     checkTokens(stream);

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java?rev=1634820&r1=1634819&r2=1634820&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java Tue Oct 28 09:04:05 2014
@@ -22,22 +22,24 @@ import java.io.StringReader;
 import java.util.Arrays;
 import java.util.Random;
 
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.TestUtil;
-import org.apache.lucene.util.automaton.AutomatonTestUtil;
 import org.apache.lucene.util.automaton.Automata;
-import org.apache.lucene.util.automaton.Operations;
+import org.apache.lucene.util.automaton.AutomatonTestUtil;
 import org.apache.lucene.util.automaton.CharacterRunAutomaton;
+import org.apache.lucene.util.automaton.Operations;
 import org.apache.lucene.util.automaton.RegExp;
 
 public class TestMockAnalyzer extends BaseTokenStreamTestCase {
@@ -302,17 +304,21 @@ public class TestMockAnalyzer extends Ba
       }
     };
 
-    final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory());
-    final Document doc = new Document();
-    final FieldType ft = new FieldType();
-    ft.setIndexOptions(IndexOptions.DOCS_ONLY);
-    ft.setTokenized(true);
-    ft.setStoreTermVectors(true);
-    ft.setStoreTermVectorPositions(true);
-    ft.setStoreTermVectorOffsets(true);
-    doc.add(new Field("f", "a", ft));
-    doc.add(new Field("f", "a", ft));
-    writer.addDocument(doc, a);
+    final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory(), a);
+    FieldTypes fieldTypes = writer.getFieldTypes();
+
+    final Document2 doc = writer.newDocument();
+
+    fieldTypes.enableTermVectors("f");
+    fieldTypes.enableTermVectorPositions("f");
+    fieldTypes.enableTermVectorOffsets("f");
+    fieldTypes.setIndexOptions("f", IndexOptions.DOCS_ONLY);
+    fieldTypes.setMultiValued("f");
+
+    doc.addAtom("f", "a");
+    doc.addAtom("f", "a");
+    writer.addDocument(doc);
+
     final LeafReader reader = getOnlySegmentReader(writer.getReader());
     final Fields fields = reader.getTermVectors(0);
     final Terms terms = fields.terms("f");

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java?rev=1634820&r1=1634819&r2=1634820&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java Tue Oct 28 09:04:05 2014
@@ -21,9 +21,10 @@ import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.document.Document2;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.IntField;
 import org.apache.lucene.index.BaseStoredFieldsFormatTestCase;
@@ -32,7 +33,6 @@ import org.apache.lucene.index.IndexWrit
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.junit.Test;
-
 import com.carrotsearch.randomizedtesting.annotations.Repeat;
 import com.carrotsearch.randomizedtesting.generators.RandomInts;
 
@@ -61,25 +61,14 @@ public class TestCompressingStoredFields
     // Cannot use RIW because this test wants CFS to stay off:
     IndexWriter iw = new IndexWriter(dir, iwConf);
 
-    final Document validDoc = new Document();
-    validDoc.add(new IntField("id", 0, Store.YES));
+    Document2 validDoc = iw.newDocument();
+    validDoc.addInt("id", 0);
     iw.addDocument(validDoc);
     iw.commit();
     
     // make sure that #writeField will fail to trigger an abort
-    final Document invalidDoc = new Document();
-    FieldType fieldType = new FieldType();
-    fieldType.setStored(true);
-    invalidDoc.add(new Field("invalid", fieldType) {
-      
-      @Override
-      public String stringValue() {
-        // TODO: really bad & scary that this causes IW to
-        // abort the segment!!  We should fix this.
-        return null;
-      }
-      
-    });
+    Document2 invalidDoc = iw.newDocument();
+    invalidDoc.addStored("invalid", (String) null);
     
     try {
       iw.addDocument(invalidDoc);

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/document/TestDocument2.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/document/TestDocument2.java?rev=1634820&r1=1634819&r2=1634820&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/document/TestDocument2.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/document/TestDocument2.java Tue Oct 28 09:04:05 2014
@@ -18,9 +18,7 @@ package org.apache.lucene.document;
  */
 
 import java.io.StringReader;
-import java.util.Arrays;
 
-import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.CannedTokenStream;
 import org.apache.lucene.analysis.MockAnalyzer;
@@ -43,32 +41,25 @@ import org.apache.lucene.search.IndexSea
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.SortedNumericSortField;
-import org.apache.lucene.search.SortedSetSortField;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
-import org.junit.Ignore;
+import org.apache.lucene.util.Version;
 
 public class TestDocument2 extends LuceneTestCase {
 
   public void testBasic() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
 
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addLargeText("body", "some text");
     doc.addShortText("title", "a title");
     doc.addAtom("id", "29jafnn");
-    doc.addStored("bytes", new byte[7]);
+    doc.addStored("bytes", new BytesRef(new byte[7]));
     doc.addInt("int", 17);
     w.addDocument(doc);
     w.close();
@@ -77,18 +68,16 @@ public class TestDocument2 extends Lucen
 
   public void testBinaryAtom() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
 
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
 
-    Document2 doc = new Document2(types);
-    doc.addAtom("binary", new byte[5]);
+    Document2 doc = w.newDocument();
+    doc.addAtom("binary", new BytesRef(new byte[5]));
     w.addDocument(doc);
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
-    assertEquals(1, s.search(types.newTermQuery("binary", new byte[5]), 1).totalHits);
+    assertEquals(1, s.search(fieldTypes.newTermQuery("binary", new byte[5]), 1).totalHits);
     r.close();
     w.close();
     dir.close();
@@ -96,30 +85,28 @@ public class TestDocument2 extends Lucen
 
   public void testBinaryAtomSort() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
 
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.enableStored("id");
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableStored("id");
     // Sort reverse by default:
-    types.enableSorting("binary", true);
+    fieldTypes.enableSorting("binary", true);
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     byte[] value = new byte[5];
     value[0] = 1;
     doc.addAtom("id", "0");
-    doc.addAtom("binary", value);
+    doc.addAtom("binary", new BytesRef(value));
     w.addDocument(doc);
 
-    doc = new Document2(types);
+    doc = w.newDocument();
     doc.addAtom("id", "1");
-    doc.addAtom("binary", new byte[5]);
+    doc.addAtom("binary", new BytesRef(new byte[5]));
     w.addDocument(doc);
     
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
-    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, types.newSort("binary"));
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, fieldTypes.newSort("binary"));
     assertEquals(2, hits.scoreDocs.length);
     assertEquals("0", r.document(hits.scoreDocs[0].doc).get("id"));
     assertEquals("1", r.document(hits.scoreDocs[1].doc).get("id"));
@@ -130,17 +117,13 @@ public class TestDocument2 extends Lucen
 
   public void testBinaryStored() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
 
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
 
-    Document2 doc = new Document2(types);
-    doc.addStored("binary", new byte[5]);
+    Document2 doc = w.newDocument();
+    doc.addStored("binary", new BytesRef(new byte[5]));
     w.addDocument(doc);
     IndexReader r = DirectoryReader.open(w, true);
-    IndexSearcher s = newSearcher(r);
     assertEquals(new BytesRef(new byte[5]), r.document(0).getBinaryValue("binary"));
     r.close();
     w.close();
@@ -149,14 +132,12 @@ public class TestDocument2 extends Lucen
 
   public void testSortedSetDocValues() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
 
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.setDocValuesType("sortedset", DocValuesType.SORTED_SET);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("sortedset", DocValuesType.SORTED_SET);
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addAtom("sortedset", "one");
     doc.addAtom("sortedset", "two");
     doc.addAtom("sortedset", "three");
@@ -185,14 +166,12 @@ public class TestDocument2 extends Lucen
 
   public void testSortedNumericDocValues() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
 
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.setDocValuesType("sortednumeric", DocValuesType.SORTED_NUMERIC);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("sortednumeric", DocValuesType.SORTED_NUMERIC);
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addInt("sortednumeric", 3);
     doc.addInt("sortednumeric", 1);
     doc.addInt("sortednumeric", 2);
@@ -212,26 +191,24 @@ public class TestDocument2 extends Lucen
 
   public void testFloatRangeQuery() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
 
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.enableStored("id");
-    types.enableSorting("id");
-    //System.out.println("id type: " + types.getFieldType("id"));
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableStored("id");
+    fieldTypes.enableSorting("id");
+    //System.out.println("id type: " + fieldTypes.getFieldType("id"));
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addFloat("float", 3f);
     doc.addAtom("id", "one");
     w.addDocument(doc);
 
-    doc = new Document2(types);
+    doc = w.newDocument();
     doc.addFloat("float", 2f);
     doc.addAtom("id", "two");
     w.addDocument(doc);
 
-    doc = new Document2(types);
+    doc = w.newDocument();
     doc.addFloat("float", 7f);
     doc.addAtom("id", "three");
     w.addDocument(doc);
@@ -240,12 +217,12 @@ public class TestDocument2 extends Lucen
     IndexSearcher s = newSearcher(r);
 
     // Make sure range query hits the right number of hits
-    assertEquals(2, s.search(types.newRangeQuery("float", 0f, true, 3f, true), 1).totalHits);
-    assertEquals(3, s.search(types.newRangeQuery("float", 0f, true, 10f, true), 1).totalHits);
-    assertEquals(1, s.search(types.newRangeQuery("float", 1f, true,2.5f, true), 1).totalHits);
+    assertEquals(2, s.search(fieldTypes.newRangeQuery("float", 0f, true, 3f, true), 1).totalHits);
+    assertEquals(3, s.search(fieldTypes.newRangeQuery("float", 0f, true, 10f, true), 1).totalHits);
+    assertEquals(1, s.search(fieldTypes.newRangeQuery("float", 1f, true,2.5f, true), 1).totalHits);
 
     // Make sure doc values shows the correct float values:
-    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, types.newSort("id"));
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("id"));
     assertEquals(3, hits.totalHits);
     NumericDocValues ndv = MultiDocValues.getNumericValues(r, "float");
     assertNotNull(ndv);
@@ -265,7 +242,7 @@ public class TestDocument2 extends Lucen
     assertEquals(2f, Float.intBitsToFloat((int) ndv.get(hit.doc)), .001f);
 
     // Make sure we can sort by the field:
-    hits = s.search(new MatchAllDocsQuery(), 3, types.newSort("float"));
+    hits = s.search(new MatchAllDocsQuery(), 3, fieldTypes.newSort("float"));
     assertEquals(3, hits.totalHits);
     assertEquals("two", r.document(hits.scoreDocs[0].doc).get("id"));
     assertEquals("one", r.document(hits.scoreDocs[1].doc).get("id"));
@@ -279,13 +256,10 @@ public class TestDocument2 extends Lucen
   // Cannot change a field from INT to DOUBLE
   public void testInvalidNumberTypeChange() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
 
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addInt("int", 3);
     w.addDocument(doc);
     w.close();
@@ -294,57 +268,42 @@ public class TestDocument2 extends Lucen
 
   public void testIntRangeQuery() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
 
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addInt("int", 3);
     w.addDocument(doc);
 
-    doc = new Document2(types);
+    doc = w.newDocument();
     doc.addInt("int", 2);
     w.addDocument(doc);
 
-    doc = new Document2(types);
+    doc = w.newDocument();
     doc.addInt("int", 7);
     w.addDocument(doc);
 
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
 
-    assertEquals(2, s.search(types.newRangeQuery("int", 0, true, 3, true), 1).totalHits);
-    assertEquals(3, s.search(types.newRangeQuery("int", 0, true, 10, true), 1).totalHits);
+    assertEquals(2, s.search(fieldTypes.newRangeQuery("int", 0, true, 3, true), 1).totalHits);
+    assertEquals(3, s.search(fieldTypes.newRangeQuery("int", 0, true, 10, true), 1).totalHits);
     w.close();
     r.close();
     dir.close();
   }
 
-  public void testExcMissingSetIndexWriter() throws Exception {
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    Document2 doc = new Document2(types);
-    try {
-      doc.addLargeText("body", "some text");
-      fail("did not hit expected exception");
-    } catch (IllegalStateException ise) {
-      // expected
-    }
-  }
-
   public void testExcAnalyzerForAtomField() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
     try {
-      types.setAnalyzer("atom", a);
+      fieldTypes.setAnalyzer("atom", new MockAnalyzer(random()));
       fail("did not hit expected exception");
     } catch (IllegalStateException ise) {
-      // expected
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"atom\": can only setIndexAnalyzer if the field is indexed", ise.getMessage());
     }
     w.close();
     dir.close();
@@ -353,17 +312,16 @@ public class TestDocument2 extends Lucen
   // Can't ask for SORTED dv but then add the field as a number
   public void testExcInvalidDocValuesTypeFirst() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.setDocValuesType("string", DocValuesType.SORTED);
-    Document2 doc = new Document2(types);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("string", DocValuesType.SORTED);
+    Document2 doc = w.newDocument();
     try {
       doc.addInt("string", 17);
       fail("did not hit expected exception");
     } catch (IllegalStateException ise) {
-      // expected
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"string\": type INT must use NUMERIC docValuesType (got: SORTED)", ise.getMessage());
     }
     doc.addAtom("string", "a string");
     w.addDocument(doc);
@@ -374,19 +332,18 @@ public class TestDocument2 extends Lucen
   // Can't ask for BINARY dv but then add the field as a number
   public void testExcInvalidBinaryDocValuesTypeFirst() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.setDocValuesType("binary", DocValuesType.BINARY);
-    Document2 doc = new Document2(types);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("binary", DocValuesType.BINARY);
+    Document2 doc = w.newDocument();
     try {
       doc.addInt("binary", 17);
       fail("did not hit expected exception");
     } catch (IllegalStateException ise) {
-      // expected
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"binary\": type INT must use NUMERIC docValuesType (got: BINARY)", ise.getMessage());
     }
-    doc.addAtom("binary", new byte[7]);
+    doc.addAtom("binary", new BytesRef(new byte[7]));
     w.addDocument(doc);
     w.close();
     dir.close();
@@ -395,17 +352,16 @@ public class TestDocument2 extends Lucen
   // Cannot store Reader:
   public void testExcStoreReaderFields() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.enableStored("body");
-    Document2 doc = new Document2(types);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableStored("body");
+    Document2 doc = w.newDocument();
     try {
       doc.addLargeText("body", new StringReader("a small string"));
       fail("did not hit expected exception");
     } catch (IllegalStateException ise) {
-      // expected
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"body\": can only store String large text fields", ise.getMessage());
     }
     doc.addLargeText("body", "a string");
     w.addDocument(doc);
@@ -416,17 +372,16 @@ public class TestDocument2 extends Lucen
   // Cannot store TokenStream:
   public void testExcStorePreTokenizedFields() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.enableStored("body");
-    Document2 doc = new Document2(types);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.enableStored("body");
+    Document2 doc = w.newDocument();
     try {
       doc.addLargeText("body", new CannedTokenStream());
       fail("did not hit expected exception");
     } catch (IllegalStateException ise) {
-      // expected
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"body\": can only store String large text fields", ise.getMessage());
     }
     doc.addLargeText("body", "a string");
     w.addDocument(doc);
@@ -436,25 +391,23 @@ public class TestDocument2 extends Lucen
 
   public void testSortable() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
 
     // Normally sorting is not enabled for atom fields:
-    types.enableSorting("id", true);
-    types.enableStored("id");
+    fieldTypes.enableSorting("id", true);
+    fieldTypes.enableStored("id");
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
 
     doc.addAtom("id", "two");
     w.addDocument(doc);
-    doc = new Document2(types);
+    doc = w.newDocument();
     doc.addAtom("id", "one");
     w.addDocument(doc);
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
-    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, types.newSort("id"));
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, fieldTypes.newSort("id"));
     assertEquals(2, hits.scoreDocs.length);
     assertEquals("two", r.document(hits.scoreDocs[0].doc).get("id"));
     assertEquals("one", r.document(hits.scoreDocs[1].doc).get("id"));
@@ -465,22 +418,20 @@ public class TestDocument2 extends Lucen
 
   public void testMultiValuedNumeric() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-
-    types.setMultiValued("numbers");
-    types.enableSorting("numbers");
-    types.enableStored("id");
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    fieldTypes.setMultiValued("numbers");
+    fieldTypes.enableSorting("numbers");
+    fieldTypes.enableStored("id");
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addInt("numbers", 1);
     doc.addInt("numbers", 2);
     doc.addAtom("id", "one");
     w.addDocument(doc);
 
-    doc = new Document2(types);
+    doc = w.newDocument();
     doc.addInt("numbers", -10);
     doc.addInt("numbers", -20);
     doc.addAtom("id", "two");
@@ -488,7 +439,7 @@ public class TestDocument2 extends Lucen
 
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
-    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, types.newSort("numbers"));
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, fieldTypes.newSort("numbers"));
     assertEquals(2, hits.scoreDocs.length);
     assertEquals("two", r.document(hits.scoreDocs[0].doc).get("id"));
     assertEquals("one", r.document(hits.scoreDocs[1].doc).get("id"));
@@ -499,22 +450,20 @@ public class TestDocument2 extends Lucen
 
   public void testMultiValuedString() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-
-    types.setMultiValued("strings");
-    types.enableSorting("strings");
-    types.enableStored("id");
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+
+    fieldTypes.setMultiValued("strings");
+    fieldTypes.enableSorting("strings");
+    fieldTypes.enableStored("id");
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addAtom("strings", "abc");
     doc.addAtom("strings", "baz");
     doc.addAtom("id", "one");
     w.addDocument(doc);
 
-    doc = new Document2(types);
+    doc = w.newDocument();
     doc.addAtom("strings", "aaa");
     doc.addAtom("strings", "bbb");
     doc.addAtom("id", "two");
@@ -522,7 +471,7 @@ public class TestDocument2 extends Lucen
 
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
-    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, types.newSort("strings"));
+    TopDocs hits = s.search(new MatchAllDocsQuery(), 2, fieldTypes.newSort("strings"));
     assertEquals(2, hits.scoreDocs.length);
     assertEquals("two", r.document(hits.scoreDocs[0].doc).get("id"));
     assertEquals("one", r.document(hits.scoreDocs[1].doc).get("id"));
@@ -534,20 +483,19 @@ public class TestDocument2 extends Lucen
   // You cannot have multi-valued DocValuesType.BINARY
   public void testExcMultiValuedDVBinary() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.setDocValuesType("binary", DocValuesType.BINARY);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("binary", DocValuesType.BINARY);
     try {
-      types.setMultiValued("binary");
+      fieldTypes.setMultiValued("binary");
       fail("did not hit expected exception");
     } catch (IllegalStateException ise) {
-      // expected
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"binary\": DocValuesType=BINARY cannot be multi-valued", ise.getMessage());
     }
-    assertFalse(types.getMultiValued("binary"));
-    Document2 doc = new Document2(types);
-    doc.addStored("binary", new byte[7]);
+    assertFalse(fieldTypes.getMultiValued("binary"));
+    Document2 doc = w.newDocument();
+    doc.addStored("binary", new BytesRef(new byte[7]));
     w.addDocument(doc);
     w.close();
     dir.close();
@@ -556,20 +504,19 @@ public class TestDocument2 extends Lucen
   // You cannot have multi-valued DocValuesType.SORTED
   public void testExcMultiValuedDVSorted() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.setDocValuesType("sorted", DocValuesType.SORTED);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("sorted", DocValuesType.SORTED);
     try {
-      types.setMultiValued("sorted");
+      fieldTypes.setMultiValued("sorted");
       fail("did not hit expected exception");
     } catch (IllegalStateException ise) {
-      // expected
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"sorted\": DocValuesType=SORTED cannot be multi-valued", ise.getMessage());
     }
-    assertFalse(types.getMultiValued("sorted"));
-    Document2 doc = new Document2(types);
-    doc.addStored("binary", new byte[7]);
+    assertFalse(fieldTypes.getMultiValued("sorted"));
+    Document2 doc = w.newDocument();
+    doc.addStored("binary", new BytesRef(new byte[7]));
     w.addDocument(doc);
     w.close();
     dir.close();
@@ -578,19 +525,18 @@ public class TestDocument2 extends Lucen
   // You cannot have multi-valued DocValuesType.NUMERIC
   public void testExcMultiValuedDVNumeric() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriter w = new IndexWriter(dir, types.getDefaultIndexWriterConfig());
-    types.setIndexWriter(w);
-    types.setDocValuesType("numeric", DocValuesType.NUMERIC);
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("numeric", DocValuesType.NUMERIC);
     try {
-      types.setMultiValued("numeric");
+      fieldTypes.setMultiValued("numeric");
       fail("did not hit expected exception");
     } catch (IllegalStateException ise) {
-      // expected
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"numeric\": DocValuesType=NUMERIC cannot be multi-valued", ise.getMessage());
     }
-    assertFalse(types.getMultiValued("numeric"));
-    Document2 doc = new Document2(types);
+    assertFalse(fieldTypes.getMultiValued("numeric"));
+    Document2 doc = w.newDocument();
     doc.addInt("numeric", 17);
     w.addDocument(doc);
     w.close();
@@ -599,29 +545,27 @@ public class TestDocument2 extends Lucen
 
   public void testPostingsFormat() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
 
-    types.setPostingsFormat("id", "Memory");
-    types.enableStored("id");
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setPostingsFormat("id", "Memory");
+    fieldTypes.enableStored("id");
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addAtom("id", "0");
     w.addDocument(doc);
 
-    doc = new Document2(types);
+    doc = w.newDocument();
     doc.addAtom("id", "1");
     w.addDocument(doc);
 
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
-    TopDocs hits = s.search(types.newTermQuery("id", "0"), 1);
+    TopDocs hits = s.search(fieldTypes.newTermQuery("id", "0"), 1);
     assertEquals(1, hits.scoreDocs.length);
     assertEquals("0", r.document(hits.scoreDocs[0].doc).get("id"));
-    hits = s.search(types.newTermQuery("id", "1"), 1);
+    hits = s.search(fieldTypes.newTermQuery("id", "1"), 1);
     assertEquals(1, hits.scoreDocs.length);
     assertEquals("1", r.document(hits.scoreDocs[0].doc).get("id"));
     r.close();
@@ -631,19 +575,17 @@ public class TestDocument2 extends Lucen
 
   public void testLongTermQuery() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
+    FieldTypes fieldTypes = w.getFieldTypes();
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addLong("id", 1L);
     w.addDocument(doc);
 
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
-    TopDocs hits = s.search(types.newTermQuery("id", 1L), 1);
+    TopDocs hits = s.search(fieldTypes.newTermQuery("id", 1L), 1);
     assertEquals(1, hits.scoreDocs.length);
     r.close();
     w.close();
@@ -652,19 +594,17 @@ public class TestDocument2 extends Lucen
 
   public void testIntTermQuery() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
+    FieldTypes fieldTypes = w.getFieldTypes();
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addInt("id", 1);
     w.addDocument(doc);
 
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
-    TopDocs hits = s.search(types.newTermQuery("id", 1), 1);
+    TopDocs hits = s.search(fieldTypes.newTermQuery("id", 1), 1);
     assertEquals(1, hits.scoreDocs.length);
     r.close();
     w.close();
@@ -673,14 +613,12 @@ public class TestDocument2 extends Lucen
 
   public void testNumericPrecisionStep() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
-    types.setNumericPrecisionStep("long", 4);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setNumericPrecisionStep("long", 4);
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addLong("long", 17);
     w.addDocument(doc);
 
@@ -693,20 +631,18 @@ public class TestDocument2 extends Lucen
 
   public void testBinaryTermQuery() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
-    types.setIndexOptions("id", IndexOptions.DOCS_ONLY);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexOptions("id", IndexOptions.DOCS_ONLY);
 
-    Document2 doc = new Document2(types);
-    doc.addStored("id", new byte[1]);
+    Document2 doc = w.newDocument();
+    doc.addStored("id", new BytesRef(new byte[1]));
     w.addDocument(doc);
 
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
-    TopDocs hits = s.search(types.newTermQuery("id", new byte[1]), 1);
+    TopDocs hits = s.search(fieldTypes.newTermQuery("id", new byte[1]), 1);
     assertEquals(1, hits.scoreDocs.length);
     r.close();
     w.close();
@@ -715,30 +651,28 @@ public class TestDocument2 extends Lucen
 
   public void testDocValuesFormat() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
+    FieldTypes fieldTypes = w.getFieldTypes();
 
-    types.setDocValuesFormat("id", "Memory");
-    types.enableStored("id");
-    types.enableSorting("id");
+    fieldTypes.setDocValuesFormat("id", "Memory");
+    fieldTypes.enableStored("id");
+    fieldTypes.enableSorting("id");
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addAtom("id", "1");
     w.addDocument(doc);
 
-    doc = new Document2(types);
+    doc = w.newDocument();
     doc.addAtom("id", "0");
     w.addDocument(doc);
 
     IndexReader r = DirectoryReader.open(w, true);
     IndexSearcher s = newSearcher(r);
-    TopDocs hits = s.search(types.newTermQuery("id", "0"), 1, types.newSort("id"));
+    TopDocs hits = s.search(fieldTypes.newTermQuery("id", "0"), 1, fieldTypes.newSort("id"));
     assertEquals(1, hits.scoreDocs.length);
     assertEquals("0", r.document(hits.scoreDocs[0].doc).get("id"));
-    hits = s.search(types.newTermQuery("id", "1"), 1);
+    hits = s.search(fieldTypes.newTermQuery("id", "1"), 1);
     assertEquals(1, hits.scoreDocs.length);
     assertEquals("1", r.document(hits.scoreDocs[0].doc).get("id"));
     r.close();
@@ -748,24 +682,23 @@ public class TestDocument2 extends Lucen
 
   public void testTermsDictTermsPerBlock() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
-    types.setIndexOptions("id", IndexOptions.DOCS_ONLY);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexOptions("id", IndexOptions.DOCS_ONLY);
 
-    types.setTermsDictBlockSize("id", 10);
+    fieldTypes.setTermsDictBlockSize("id", 10);
     for(int i=0;i<10;i++) {
-      Document2 doc = new Document2(types);
+      Document2 doc = w.newDocument();
       doc.addAtom("id", "0" + i);
       w.addDocument(doc);
     }
     for(int i=0;i<10;i++) {
-      Document2 doc = new Document2(types);
+      Document2 doc = w.newDocument();
       doc.addAtom("id", "1" + i);
       w.addDocument(doc);
     }
+    w.forceMerge(1);
     w.close();
 
     // Use CheckIndex to verify we got 2 terms blocks:
@@ -784,36 +717,53 @@ public class TestDocument2 extends Lucen
 
   public void testExcInvalidDocValuesFormat() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
+    FieldTypes fieldTypes = w.getFieldTypes();
     try {
-      types.setDocValuesFormat("id", "foobar");
+      fieldTypes.setDocValuesFormat("id", "foobar");
       fail("did not hit exception");
     } catch (IllegalArgumentException iae) {
-      // expected
+      // Expected
+      assertTrue("wrong exception message: " + iae.getMessage(), iae.getMessage().startsWith("field \"id\": A SPI class of type org.apache.lucene.codecs.DocValuesFormat with name 'foobar' does not exist"));
     }
-    types.setDocValuesFormat("id", "Memory");
+    fieldTypes.setDocValuesFormat("id", "Memory");
+    w.close();
+    dir.close();
+  }
+
+  public void testExcInvalidDocValuesType() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("id", DocValuesType.BINARY);
+    Document2 doc = w.newDocument();
+    try {
+      doc.addInt("id", 17);
+      fail("did not hit exception");
+    } catch (IllegalStateException ise) {
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"id\": type INT must use NUMERIC docValuesType (got: BINARY)", ise.getMessage());
+    }
+    fieldTypes.setPostingsFormat("id", "Memory");
     w.close();
     dir.close();
   }
 
   public void testExcInvalidPostingsFormat() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
+    FieldTypes fieldTypes = w.getFieldTypes();
     try {
-      types.setPostingsFormat("id", "foobar");
+      fieldTypes.setPostingsFormat("id", "foobar");
       fail("did not hit exception");
     } catch (IllegalArgumentException iae) {
-      // expected
+      // Expected
+      assertTrue("wrong exception message: " + iae.getMessage(), iae.getMessage().startsWith("field \"id\": A SPI class of type org.apache.lucene.codecs.PostingsFormat with name 'foobar' does not exist"));
     }
-    types.setPostingsFormat("id", "Memory");
+    fieldTypes.setPostingsFormat("id", "Memory");
     w.close();
     dir.close();
   }
@@ -821,29 +771,25 @@ public class TestDocument2 extends Lucen
   /** Make sure that if we index an ATOM field, at search time we get KeywordAnalyzer for it. */
   public void testAtomFieldUsesKeywordAnalyzer() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
-    Document2 doc = new Document2(types);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    Document2 doc = w.newDocument();
     doc.addAtom("id", "foo bar");
     w.addDocument(doc);
-    BaseTokenStreamTestCase.assertTokenStreamContents(types.getQueryAnalyzer().tokenStream("id", "foo bar"), new String[] {"foo bar"}, new int[1], new int[] {7});
+    BaseTokenStreamTestCase.assertTokenStreamContents(fieldTypes.getQueryAnalyzer().tokenStream("id", "foo bar"), new String[] {"foo bar"}, new int[1], new int[] {7});
     w.close();
     dir.close();
   }
 
   public void testHighlight() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
+    IndexWriterConfig iwc = newIndexWriterConfig();
     IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
-    types.disableHighlighting("no_highlight");
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableHighlighting("no_highlight");
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addLargeText("highlight", "here is some content");
     doc.addLargeText("no_highlight", "here is some content");
     w.addDocument(doc);
@@ -859,22 +805,20 @@ public class TestDocument2 extends Lucen
 
   public void testAnalyzerPositionGap() throws Exception {
     Directory dir = newDirectory();
-    Analyzer a = new MockAnalyzer(random());
-    FieldTypes types = new FieldTypes(a);
-    IndexWriterConfig iwc = types.getDefaultIndexWriterConfig();
-    IndexWriter w = new IndexWriter(dir, iwc);
-    types.setIndexWriter(w);
-    types.setIndexOptions("nogap", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    types.setMultiValued("nogap");
-    types.disableHighlighting("nogap");
-    types.setAnalyzerPositionGap("nogap", 0);
-
-    types.setIndexOptions("onegap", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
-    types.setMultiValued("onegap");
-    types.disableHighlighting("onegap");
-    types.setAnalyzerPositionGap("onegap", 1);
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexOptions("nogap", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    fieldTypes.setMultiValued("nogap");
+    fieldTypes.disableHighlighting("nogap");
+    fieldTypes.setAnalyzerPositionGap("nogap", 0);
+
+    fieldTypes.setIndexOptions("onegap", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+    fieldTypes.setMultiValued("onegap");
+    fieldTypes.disableHighlighting("onegap");
+    fieldTypes.setAnalyzerPositionGap("onegap", 1);
 
-    Document2 doc = new Document2(types);
+    Document2 doc = w.newDocument();
     doc.addLargeText("nogap", "word1");
     doc.addLargeText("nogap", "word2");
     doc.addLargeText("onegap", "word1");
@@ -898,6 +842,105 @@ public class TestDocument2 extends Lucen
     dir.close();
   }
 
+  public void testFieldTypesAreSaved() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    Document2 doc = w.newDocument();
+    doc.addAtom("id", new BytesRef(new byte[5]));
+    w.addDocument(doc);
+    w.close();
+
+    w = new IndexWriter(dir, newIndexWriterConfig());
+    doc = w.newDocument();
+    try {
+      doc.addInt("id", 7);
+      fail("did not hit exception");
+    } catch (IllegalStateException iae) {
+      // Expected
+      assertEquals("wrong exception message: " + iae.getMessage(), "field \"id\": cannot change from value type ATOM to INT", iae.getMessage());
+    }
+    doc.addAtom("id", new BytesRef(new byte[7]));
+    w.addDocument(doc);
+    w.close();
+    dir.close();
+  }
+
+  public void testDisableIndexing() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setIndexOptions("foo", null);
+
+    Document2 doc = w.newDocument();
+    doc.addAtom("foo", "bar");
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    try {
+      fieldTypes.newTermQuery("foo", "bar");
+      fail("did not hit exception");
+    } catch (IllegalStateException ise) {
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"foo\": cannot create term query: this field was not indexed", ise.getMessage());
+    }
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testExcDisableDocValues() throws Exception {
+
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig();
+    IndexWriter w = new IndexWriter(dir, iwc);
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.setDocValuesType("foo", null);
+
+    Document2 doc = w.newDocument();
+    doc.addInt("foo", 17);
+    w.addDocument(doc);
+
+    IndexReader r = DirectoryReader.open(w, true);
+    try {
+      fieldTypes.newSort("foo");
+      fail("did not hit exception");
+    } catch (IllegalStateException ise) {
+      // Expected
+      assertEquals("wrong exception message: " + ise.getMessage(), "field \"foo\": this field was not indexed for sorting", ise.getMessage());
+    }
+    r.close();
+    w.close();
+    dir.close();
+  }
+
+  public void testExcRangeQuery() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableFastRanges("int");
+    Document2 doc = w.newDocument();
+    doc.addInt("int", 17);
+    w.addDocument(doc);
+    try {
+      fieldTypes.newRangeQuery("int", 0, true, 7, true);
+      fail("did not hit exception");
+    } catch (IllegalStateException ise) {
+      assertEquals("field \"int\": this field was not indexed for fast ranges", ise.getMessage());
+    }
+    w.close();
+    dir.close();
+  }
+
+  public void testIndexCreatedVersion() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+    assertEquals(Version.LATEST, w.getFieldTypes().getIndexCreatedVersion());
+    w.close();
+    dir.close();
+  }
+
+
   // nocommit test per-field analyzers
 
   // nocommit test per-field sims
@@ -905,6 +948,4 @@ public class TestDocument2 extends Lucen
   // nocommit test for pre-analyzed
 
   // nocommit test multi-valued
-
-  // nocommit test serialize
 }

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=1634820&r1=1634819&r2=1634820&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java Tue Oct 28 09:04:05 2014
@@ -163,15 +163,15 @@ public class TestAddIndexes extends Luce
     Directory aux = newDirectory();
 
     setUpDirs(dir, aux);
-    IndexWriter writer = newWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND));
+    IndexWriter writer = newWriter(dir, newIndexWriterConfig().setOpenMode(OpenMode.APPEND));
     writer.addIndexes(aux);
 
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
-      doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
+      Document2 doc = writer.newDocument();
+      doc.addAtom("id", "" + (i % 10));
+      doc.addLargeText("content", "bbb " + i);
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
     // Deletes one of the 10 added docs, leaving 9:
@@ -204,9 +204,9 @@ public class TestAddIndexes extends Luce
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
-      doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
+      Document2 doc = writer.newDocument();
+      doc.addAtom("id", "" + (i % 10));
+      doc.addLargeText("content", "bbb " + i);
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
     
@@ -242,9 +242,9 @@ public class TestAddIndexes extends Luce
     // Adds 10 docs, then replaces them with another 10
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
-      Document doc = new Document();
-      doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
-      doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
+      Document2 doc = writer.newDocument();
+      doc.addAtom("id", "" + (i % 10));
+      doc.addLargeText("content", "bbb " + i);
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
 
@@ -517,16 +517,16 @@ public class TestAddIndexes extends Luce
 
   private void addDocs(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "aaa", Field.Store.NO));
+      Document2 doc = writer.newDocument();
+      doc.addLargeText("content", "aaa");
       writer.addDocument(doc);
     }
   }
 
   private void addDocs2(IndexWriter writer, int numDocs) throws IOException {
     for (int i = 0; i < numDocs; i++) {
-      Document doc = new Document();
-      doc.add(newTextField("content", "bbb", Field.Store.NO));
+      Document2 doc = writer.newDocument();
+      doc.addLargeText("content", "bbb");
       writer.addDocument(doc);
     }
   }
@@ -602,21 +602,20 @@ public class TestAddIndexes extends Luce
     LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
     lmp.setNoCFSRatio(0.0);
     lmp.setMergeFactor(100);
-    FieldTypes types = new FieldTypes(new MockAnalyzer(random()));
-    IndexWriter writer = new IndexWriter(dir, types.getDefaultIndexWriterConfig()
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()
         .setMaxBufferedDocs(5).setMergePolicy(lmp));
-    types.setIndexWriter(writer);
 
-    Document2 doc = new Document2(types);
-    types.enableTermVectors("content");
-    types.enableTermVectorPositions("content");
-    types.enableTermVectorOffsets("content");
+    FieldTypes fieldTypes = writer.getFieldTypes();
+    Document2 doc = writer.newDocument();
+    fieldTypes.enableTermVectors("content");
+    fieldTypes.enableTermVectorPositions("content");
+    fieldTypes.enableTermVectorOffsets("content");
     doc.addLargeText("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
     for(int i=0;i<60;i++) {
       writer.addDocument(doc);
     }
 
-    doc = new Document2(types);
+    doc = writer.newDocument();
     doc.addStored("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
     doc.addStored("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
     doc.addStored("content", "aaa bbb ccc ddd eee fff ggg hhh iii");
@@ -641,11 +640,10 @@ public class TestAddIndexes extends Luce
 
   // TODO: these are also in TestIndexWriter... add a simple doc-writing method
   // like this to LuceneTestCase?
-  private void addDoc(IndexWriter writer) throws IOException
-  {
-      Document doc = new Document();
-      doc.add(newTextField("content", "aaa", Field.Store.NO));
-      writer.addDocument(doc);
+  private void addDoc(IndexWriter writer) throws IOException {
+    Document2 doc = writer.newDocument();
+    doc.addLargeText("content", "aaa");
+    writer.addDocument(doc);
   }
   
   private abstract class RunAddIndexesThreads {