You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2011/08/16 00:03:52 UTC

svn commit: r1158029 [8/15] - in /lucene/dev/branches/fieldtype_conflicted: lucene/ lucene/contrib/ lucene/contrib/demo/src/java/org/apache/lucene/demo/ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/ lucene/contrib/highlighter/...

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Mon Aug 15 22:03:41 2011
@@ -42,12 +42,11 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.document.BinaryField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.FieldCache;
@@ -78,6 +77,7 @@ import org.apache.lucene.index.codecs.pr
 
 public class TestIndexWriter extends LuceneTestCase {
 
+    private static final FieldType storedTextType = new FieldType(TextField.TYPE_UNSTORED);
     public void testDocCount() throws IOException {
         Directory dir = newDirectory();
 
@@ -138,15 +138,15 @@ public class TestIndexWriter extends Luc
     static void addDoc(IndexWriter writer) throws IOException
     {
         Document doc = new Document();
-        doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+        doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
         writer.addDocument(doc);
     }
 
     static void addDocWithIndex(IndexWriter writer, int index) throws IOException
     {
         Document doc = new Document();
-        doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("content", "aaa " + index, storedTextType));
+        doc.add(newField("id", "" + index, storedTextType));
         writer.addDocument(doc);
     }
 
@@ -256,12 +256,12 @@ public class TestIndexWriter extends Luc
       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10));
       for(int j=0;j<100;j++) {
         Document doc = new Document();
-        doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
-        doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("a"+j, "aaa" + j, storedTextType));
+        doc.add(newField("b"+j, "aaa" + j, storedTextType));
+        doc.add(newField("c"+j, "aaa" + j, storedTextType));
+        doc.add(newField("d"+j, "aaa", storedTextType));
+        doc.add(newField("e"+j, "aaa", storedTextType));
+        doc.add(newField("f"+j, "aaa", storedTextType));
         writer.addDocument(doc);
       }
       writer.close();
@@ -292,7 +292,7 @@ public class TestIndexWriter extends Luc
       int lastNumFile = dir.listAll().length;
       for(int j=0;j<9;j++) {
         Document doc = new Document();
-        doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(newField("field", "aaa" + j, storedTextType));
         writer.addDocument(doc);
         int numFile = dir.listAll().length;
         // Verify that with a tiny RAM buffer we see new
@@ -315,7 +315,7 @@ public class TestIndexWriter extends Luc
       int lastFlushCount = -1;
       for(int j=1;j<52;j++) {
         Document doc = new Document();
-        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("field", storedTextType, "aaa" + j));
         writer.addDocument(doc);
         _TestUtil.syncConcurrentMerges(writer);
         int flushCount = writer.getFlushCount();
@@ -369,7 +369,7 @@ public class TestIndexWriter extends Luc
 
       for(int j=1;j<52;j++) {
         Document doc = new Document();
-        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("field", storedTextType, "aaa" + j));
         writer.addDocument(doc);
       }
       
@@ -430,7 +430,7 @@ public class TestIndexWriter extends Luc
         for(int j=0;j<100;j++) {
           Document doc = new Document();
           for(int k=0;k<100;k++) {
-            doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
+            doc.add(newField("field", Integer.toString(random.nextInt()), storedTextType));
           }
           writer.addDocument(doc);
         }
@@ -439,7 +439,7 @@ public class TestIndexWriter extends Luc
         // occurs (heavy on byte blocks)
         for(int j=0;j<100;j++) {
           Document doc = new Document();
-          doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
+          doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", storedTextType));
           writer.addDocument(doc);
         }
 
@@ -454,7 +454,7 @@ public class TestIndexWriter extends Luc
           String longTerm = b.toString();
 
           Document doc = new Document();
-          doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
+          doc.add(newField("field", longTerm, storedTextType));
           writer.addDocument(doc);
         }
       }
@@ -472,11 +472,17 @@ public class TestIndexWriter extends Luc
       MockDirectoryWrapper dir = newDirectory();
       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10));
       // Enable norms for only 1 doc, pre flush
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      customType.setOmitNorms(true);
       for(int j=0;j<10;j++) {
         Document doc = new Document();
-        Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
+        Field f = null;
         if (j != 8) {
-          f.setOmitNorms(true);
+          f = newField("field", "aaa", customType);
+        }
+        else {
+          f = newField("field", "aaa", storedTextType);
         }
         doc.add(f);
         writer.addDocument(doc);
@@ -495,9 +501,12 @@ public class TestIndexWriter extends Luc
       // Enable norms for only 1 doc, post flush
       for(int j=0;j<27;j++) {
         Document doc = new Document();
-        Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
+        Field f = null;
         if (j != 26) {
-          f.setOmitNorms(true);
+          f = newField("field", "aaa", customType);
+        }
+        else {
+          f = newField("field", "aaa", storedTextType);
         }
         doc.add(f);
         writer.addDocument(doc);
@@ -527,7 +536,12 @@ public class TestIndexWriter extends Luc
         b.append(" a a a a a a a a");
       }
       Document doc = new Document();
-      doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      customType.setStoreTermVectors(true);
+      customType.setStoreTermVectorPositions(true);
+      customType.setStoreTermVectorOffsets(true);
+      doc.add(newField("field", b.toString(), customType));
       writer.addDocument(doc);
       writer.close();
 
@@ -595,7 +609,12 @@ public class TestIndexWriter extends Luc
               setMergePolicy(newLogMergePolicy(10))
       );
       Document doc = new Document();
-      doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      customType.setStoreTermVectors(true);
+      customType.setStoreTermVectorPositions(true);
+      customType.setStoreTermVectorOffsets(true);
+      doc.add(newField("field", "aaa", customType));
       for(int i=0;i<19;i++)
         writer.addDocument(doc);
       writer.flush(false, true);
@@ -615,7 +634,12 @@ public class TestIndexWriter extends Luc
       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
       writer.setInfoStream(VERBOSE ? System.out : null);
       Document doc = new Document();
-      doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      customType.setStoreTermVectors(true);
+      customType.setStoreTermVectorPositions(true);
+      customType.setStoreTermVectorOffsets(true);
+      doc.add(newField("field", "aaa", customType));
       writer.addDocument(doc);
       writer.commit();
       if (VERBOSE) {
@@ -644,7 +668,9 @@ public class TestIndexWriter extends Luc
         TEST_VERSION_CURRENT, new MockAnalyzer(random)));
 
     Document document = new Document();
-    document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStoreTermVectors(true);
+    document.add(newField("tvtest", "", customType));
     iw.addDocument(document);
     iw.close();
     dir.close();
@@ -661,8 +687,9 @@ public class TestIndexWriter extends Luc
       ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
       IndexWriter iw = new IndexWriter(dir, conf);
       Document document = new Document();
-      document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
-                             Field.TermVector.YES));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStoreTermVectors(true);
+      document.add(newField("tvtest", "a b c", customType));
       Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
       for(int i=0;i<4;i++)
         iw.addDocument(document);
@@ -688,24 +715,21 @@ public class TestIndexWriter extends Luc
       Document doc = new Document();
       String contents = "aa bb cc dd ee ff gg hh ii jj kk";
 
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      FieldType type = null;
       if (i == 7) {
         // Add empty docs here
-        doc.add(newField("content3", "", Field.Store.NO,
-                          Field.Index.ANALYZED));
+        doc.add(newField("content3", "", TextField.TYPE_UNSTORED));
       } else {
-        Field.Store storeVal;
         if (i%2 == 0) {
-          doc.add(newField("content4", contents, Field.Store.YES,
-                            Field.Index.ANALYZED));
-          storeVal = Field.Store.YES;
+          doc.add(newField("content4", contents, customType));
+          type = customType;
         } else
-          storeVal = Field.Store.NO;
-        doc.add(newField("content1", contents, storeVal,
-                          Field.Index.ANALYZED));
-        doc.add(newField("content3", "", Field.Store.YES,
-                          Field.Index.ANALYZED));
-        doc.add(newField("content5", "", storeVal,
-                          Field.Index.ANALYZED));
+          type = TextField.TYPE_UNSTORED; 
+        doc.add(newField("content1", contents, TextField.TYPE_UNSTORED));
+        doc.add(newField("content3", "", customType));
+        doc.add(newField("content5", "", type));
       }
 
       for(int j=0;j<4;j++)
@@ -731,7 +755,11 @@ public class TestIndexWriter extends Luc
     Directory directory = newDirectory();
 
     final Document doc = new Document();
-    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setTokenized(false);
+
+    Field idField = newField("id", "", customType);
     doc.add(idField);
 
     for(int pass=0;pass<2;pass++) {
@@ -835,7 +863,7 @@ public class TestIndexWriter extends Luc
     for(int i=0;i<10000;i++)
       b.append(" a");
     b.append(" x");
-    doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", b.toString(), TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
     writer.close();
 
@@ -853,7 +881,7 @@ public class TestIndexWriter extends Luc
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("", "a b c", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
     writer.close();
     dir.close();
@@ -887,8 +915,9 @@ public class TestIndexWriter extends Luc
     Directory dir = newDirectory();
     MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    doc.add(newField("field", "a field", customType));
     w.addDocument(doc);
     w.commit();
     assertTrue(w.beforeWasCalled);
@@ -931,7 +960,7 @@ public class TestIndexWriter extends Luc
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(new Field("field", tokens));
+    doc.add(new TextField("field", tokens));
     w.addDocument(doc);
     w.commit();
 
@@ -972,20 +1001,20 @@ public class TestIndexWriter extends Luc
       b[i] = (byte) (i+77);
 
     Document doc = new Document();
-    Field f = new Field("binary", b, 10, 17);
-    byte[] bx = f.getBinaryValue();
+    Field f = new BinaryField("binary", b, 10, 17);
+    byte[] bx = f.binaryValue(null).bytes;
     assertTrue(bx != null);
     assertEquals(50, bx.length);
-    assertEquals(10, f.getBinaryOffset());
-    assertEquals(17, f.getBinaryLength());
+    assertEquals(10, f.binaryValue(null).offset);
+    assertEquals(17, f.binaryValue(null).length);
     doc.add(f);
     w.addDocument(doc);
     w.close();
 
     IndexReader ir = IndexReader.open(dir, true);
-    doc = ir.document(0);
-    f = doc.getField("binary");
-    b = f.getBinaryValue();
+    Document doc2 = ir.document(0);
+    IndexableField f2 = doc2.getField("binary");
+    b = f2.binaryValue(null).bytes;
     assertTrue(b != null);
     assertEquals(17, b.length, 17);
     assertEquals(87, b[0]);
@@ -1001,10 +1030,11 @@ public class TestIndexWriter extends Luc
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, analyzer));
     Document doc = new Document();
-    Field f = newField("field", "", Field.Store.NO,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
-    Field f2 = newField("field", "crunch man", Field.Store.NO,
-        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS);
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    Field f = newField("field", "", customType);
+    Field f2 = newField("field", "crunch man", customType);
     doc.add(f);
     doc.add(f2);
     w.addDocument(doc);
@@ -1046,8 +1076,14 @@ public class TestIndexWriter extends Luc
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
     Document doc = new Document();
-    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
-                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    
+    doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType));
     writer.addDocument(doc);
     writer.addDocument(doc);
     writer.addDocument(doc);
@@ -1099,7 +1135,7 @@ public class TestIndexWriter extends Luc
             w = new IndexWriter(dir, conf);
 
             Document doc = new Document();
-            doc.add(newField("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED));
+            doc.add(newField("field", "some text contents", storedTextType));
             for(int i=0;i<100;i++) {
               w.addDocument(doc);
               if (i%10 == 0) {
@@ -1213,9 +1249,18 @@ public class TestIndexWriter extends Luc
       b[i] = (byte) (i+77);
 
     Document doc = new Document();
-    Field f = new Field("binary", b, 10, 17);
+
+    FieldType customType = new FieldType(BinaryField.TYPE_STORED);
+    customType.setTokenized(true);
+    customType.setIndexed(true);
+    
+    Field f = new Field("binary", customType, b, 10, 17);
     f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false));
-    Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED);
+
+    FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED);
+    customType2.setStored(true);
+    
+    Field f2 = newField("string", "value", customType2);
     f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false));
     doc.add(f);
     doc.add(f2);
@@ -1238,16 +1283,16 @@ public class TestIndexWriter extends Luc
     w.close();
 
     IndexReader ir = IndexReader.open(dir, true);
-    doc = ir.document(0);
-    f = doc.getField("binary");
-    b = f.getBinaryValue();
+    Document doc2 = ir.document(0);
+    IndexableField f3 = doc2.getField("binary");
+    b = f3.binaryValue(null).bytes;
     assertTrue(b != null);
     assertEquals(17, b.length, 17);
     assertEquals(87, b[0]);
 
-    assertTrue(ir.document(0).getFieldable("binary").isBinary());
-    assertTrue(ir.document(1).getFieldable("binary").isBinary());
-    assertTrue(ir.document(2).getFieldable("binary").isBinary());
+    assertTrue(ir.document(0).getField("binary").binaryValue(null)!=null);
+    assertTrue(ir.document(1).getField("binary").binaryValue(null)!=null);
+    assertTrue(ir.document(2).getField("binary").binaryValue(null)!=null);
 
     assertEquals("value", ir.document(0).get("string"));
     assertEquals("value", ir.document(1).get("string"));
@@ -1272,13 +1317,16 @@ public class TestIndexWriter extends Luc
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("zzz", "a b c", Field.Store.YES, Field.Index.NO));
-    doc.add(newField("aaa", "a b c", Field.Store.YES, Field.Index.NO));
-    doc.add(newField("zzz", "1 2 3", Field.Store.YES, Field.Index.NO));
+
+    FieldType customType = new FieldType();
+    customType.setStored(true);
+    doc.add(newField("zzz", "a b c", customType));
+    doc.add(newField("aaa", "a b c", customType));
+    doc.add(newField("zzz", "1 2 3", customType));
     w.addDocument(doc);
     IndexReader r = w.getReader();
-    doc = r.document(0);
-    Iterator<Fieldable> it = doc.getFields().iterator();
+    Document doc2 = r.document(0);
+    Iterator<IndexableField> it = doc2.getFields().iterator();
     assertTrue(it.hasNext());
     Field f = (Field) it.next();
     assertEquals(f.name(), "zzz");
@@ -1322,7 +1370,7 @@ public class TestIndexWriter extends Luc
       s.append(' ').append(i);
     }
     Document d = new Document();
-    Field f = newField("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED);
+    Field f = newField("field", s.toString(), TextField.TYPE_UNSTORED);
     d.add(f);
     w.addDocument(d);
 
@@ -1354,7 +1402,7 @@ public class TestIndexWriter extends Luc
               setMergePolicy(mergePolicy)
       );
       Document doc = new Document();
-      doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("field", "go", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
       IndexReader r;
       if (iter == 0) {
@@ -1422,7 +1470,14 @@ public class TestIndexWriter extends Luc
 
     // First commit
     Document doc = new Document();
-    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    
+    doc.add(newField("c", "val", customType));
     writer.addDocument(doc);
     writer.commit();
     assertEquals(1, IndexReader.listCommits(dir).size());
@@ -1432,7 +1487,7 @@ public class TestIndexWriter extends Luc
 
     // Second commit - now KeepOnlyLastCommit cannot delete the prev commit.
     doc = new Document();
-    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(newField("c", "val", customType));
     writer.addDocument(doc);
     writer.commit();
     assertEquals(2, IndexReader.listCommits(dir).size());
@@ -1479,14 +1534,19 @@ public class TestIndexWriter extends Luc
     }
 
     Document doc = new Document();
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
     // create as many files as possible
-    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(newField("c", "val", customType));
     writer.addDocument(doc);
     // Adding just one document does not call flush yet.
     assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length);
 
     doc = new Document();
-    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(newField("c", "val", customType));
     writer.addDocument(doc);
 
     // The second document should cause a flush.
@@ -1509,7 +1569,12 @@ public class TestIndexWriter extends Luc
         TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
 
     Document doc = new Document();
-    doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    doc.add(newField("c", "val", customType));
     w.addDocument(doc);
     w.addDocument(doc);
     IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(
@@ -1536,7 +1601,10 @@ public class TestIndexWriter extends Luc
 
     final List<Integer> fieldIDs = new ArrayList<Integer>();
 
-    Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setTokenized(false);
+    Field idField = newField("id", "", customType);
 
     for(int i=0;i<fieldCount;i++) {
       fieldIDs.add(i);
@@ -1548,6 +1616,8 @@ public class TestIndexWriter extends Luc
       System.out.println("TEST: build index docCount=" + docCount);
     }
 
+    FieldType customType2 = new FieldType();
+    customType2.setStored(true);
     for(int i=0;i<docCount;i++) {
       Document doc = new Document();
       doc.add(idField);
@@ -1562,7 +1632,7 @@ public class TestIndexWriter extends Luc
         final String s;
         if (rand.nextInt(4) != 3) {
           s = _TestUtil.randomUnicodeString(rand, 1000);
-          doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO));
+          doc.add(newField("f"+field, s, customType2));
         } else {
           s = null;
         }
@@ -1628,12 +1698,23 @@ public class TestIndexWriter extends Luc
     String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
     BIG=BIG+BIG+BIG+BIG;
 
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setOmitNorms(true);
+    FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED);
+    customType2.setStored(true);
+    customType2.setTokenized(false);
+    FieldType customType3 = new FieldType(TextField.TYPE_UNSTORED);
+    customType3.setStored(true);
+    customType3.setTokenized(false);
+    customType3.setOmitNorms(true);
+    
     for (int i=0; i<2; i++) {
       Document doc = new Document();
-      doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
-      doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED));
-      doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED));
-      doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS));
+      doc.add(new Field("id", customType3, Integer.toString(i)+BIG));
+      doc.add(new Field("str", customType2, Integer.toString(i)+BIG));
+      doc.add(new Field("str2", storedTextType, Integer.toString(i)+BIG));
+      doc.add(new Field("str3", customType, Integer.toString(i)+BIG));
       indexWriter.addDocument(doc);
     }
 
@@ -1707,12 +1788,12 @@ public class TestIndexWriter extends Luc
 
     // This contents produces a too-long term:
     String contents = "abc xyz x" + bigTerm + " another term";
-    doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(new TextField("content", contents));
     w.addDocument(doc);
 
     // Make sure we can add another normal document
     doc = new Document();
-    doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(new TextField("content", "abc bbb ccc"));
     w.addDocument(doc);
 
     IndexReader reader = w.getReader();
@@ -1742,7 +1823,9 @@ public class TestIndexWriter extends Luc
     // Make sure we can add a document with exactly the
     // maximum length term, and search on that term:
     doc = new Document();
-    Field contentField = new Field("content", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setTokenized(false);
+    Field contentField = new Field("content", customType, "");
     doc.add(contentField);
 
     w = new RandomIndexWriter(random, dir);
@@ -1779,7 +1862,7 @@ public class TestIndexWriter extends Luc
     iwc.setReaderTermsIndexDivisor(1);
     IndexWriter writer = new IndexWriter(dir, iwc);
     Document doc = new Document();
-    doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("", "a b c", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
     writer.close();
     dir.close();

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java Mon Aug 15 22:03:41 2011
@@ -30,6 +30,7 @@ import org.apache.lucene.analysis.MockTo
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.search.IndexSearcher;
@@ -342,7 +343,7 @@ public class TestIndexWriterCommit exten
             try {
               final Document doc = new Document();
               IndexReader r = IndexReader.open(dir);
-              Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+              Field f = newField("f", "", StringField.TYPE_UNSTORED);
               doc.add(f);
               int count = 0;
               do {

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Mon Aug 15 22:03:41 2011
@@ -31,7 +31,13 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Document;
+<<<<<<<
 import org.apache.lucene.document.Field;
+=======
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
+>>>>>>>
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
@@ -55,17 +61,18 @@ public class TestIndexWriterDelete exten
     IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
         TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1));
 
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
+    FieldType custom1 = new FieldType();
+    custom1.setStored(true);
+    FieldType custom2 = new FieldType(TextField.TYPE_UNSTORED);
+    custom2.setStored(true);
     for (int i = 0; i < keywords.length; i++) {
       Document doc = new Document();
-      doc.add(newField("id", keywords[i], Field.Store.YES,
-                        Field.Index.NOT_ANALYZED));
-      doc.add(newField("country", unindexed[i], Field.Store.YES,
-                        Field.Index.NO));
-      doc.add(newField("contents", unstored[i], Field.Store.NO,
-                        Field.Index.ANALYZED));
-      doc
-        .add(newField("city", text[i], Field.Store.YES,
-                       Field.Index.ANALYZED));
+      doc.add(newField("id", keywords[i], custom));
+      doc.add(newField("country", unindexed[i], custom1));
+      doc.add(newField("contents", unstored[i], TextField.TYPE_UNSTORED));
+      doc.add(newField("city", text[i], custom2));
       modifier.addDocument(doc);
     }
     modifier.optimize();
@@ -383,11 +390,11 @@ public class TestIndexWriterDelete exten
   private void updateDoc(IndexWriter modifier, int id, int value)
       throws IOException {
     Document doc = new Document();
-    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
-    doc.add(newField("id", String.valueOf(id), Field.Store.YES,
-        Field.Index.NOT_ANALYZED));
-    doc.add(newField("value", String.valueOf(value), Field.Store.NO,
-        Field.Index.NOT_ANALYZED));
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
+    doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
+    doc.add(newField("id", String.valueOf(id), custom));
+    doc.add(newField("value", String.valueOf(value), StringField.TYPE_UNSTORED));
     modifier.updateDocument(new Term("id", String.valueOf(id)), doc);
   }
 
@@ -395,11 +402,11 @@ public class TestIndexWriterDelete exten
   private void addDoc(IndexWriter modifier, int id, int value)
       throws IOException {
     Document doc = new Document();
-    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
-    doc.add(newField("id", String.valueOf(id), Field.Store.YES,
-        Field.Index.NOT_ANALYZED));
-    doc.add(newField("value", String.valueOf(value), Field.Store.NO,
-        Field.Index.NOT_ANALYZED));
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
+    doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
+    doc.add(newField("id", String.valueOf(id), custom));
+    doc.add(newField("value", String.valueOf(value), StringField.TYPE_UNSTORED));
     modifier.addDocument(doc);
   }
 
@@ -433,12 +440,12 @@ public class TestIndexWriterDelete exten
     // TODO: find the resource leak that only occurs sometimes here.
     startDir.setNoDeleteOpenFile(false);
     IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
     for (int i = 0; i < 157; i++) {
       Document d = new Document();
-      d.add(newField("id", Integer.toString(i), Field.Store.YES,
-                      Field.Index.NOT_ANALYZED));
-      d.add(newField("content", "aaa " + i, Field.Store.NO,
-                      Field.Index.ANALYZED));
+      d.add(newField("id", Integer.toString(i), custom));
+      d.add(newField("content", "aaa " + i, TextField.TYPE_UNSTORED));
       writer.addDocument(d);
     }
     writer.close();
@@ -516,10 +523,8 @@ public class TestIndexWriterDelete exten
             for (int i = 0; i < 13; i++) {
               if (updates) {
                 Document d = new Document();
-                d.add(newField("id", Integer.toString(i), Field.Store.YES,
-                                Field.Index.NOT_ANALYZED));
-                d.add(newField("content", "bbb " + i, Field.Store.NO,
-                                Field.Index.ANALYZED));
+                d.add(newField("id", Integer.toString(i), custom));
+                d.add(newField("content", "bbb " + i, TextField.TYPE_UNSTORED));
                 modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
               } else { // deletes
                 modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
@@ -708,16 +713,18 @@ public class TestIndexWriterDelete exten
 
     dir.failOn(failure.reset());
 
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
+    FieldType custom1 = new FieldType();
+    custom1.setStored(true);
+    FieldType custom2 = new FieldType(TextField.TYPE_UNSTORED);
+    custom2.setStored(true);
     for (int i = 0; i < keywords.length; i++) {
       Document doc = new Document();
-      doc.add(newField("id", keywords[i], Field.Store.YES,
-                        Field.Index.NOT_ANALYZED));
-      doc.add(newField("country", unindexed[i], Field.Store.YES,
-                        Field.Index.NO));
-      doc.add(newField("contents", unstored[i], Field.Store.NO,
-                        Field.Index.ANALYZED));
-      doc.add(newField("city", text[i], Field.Store.YES,
-                        Field.Index.ANALYZED));
+      doc.add(newField("id", keywords[i], StringField.TYPE_UNSTORED));
+      doc.add(newField("country", unindexed[i], custom1));
+      doc.add(newField("contents", unstored[i], TextField.TYPE_UNSTORED));
+      doc.add(newField("city", text[i], custom2));
       modifier.addDocument(doc);
     }
     // flush (and commit if ac)
@@ -831,16 +838,18 @@ public class TestIndexWriterDelete exten
     modifier.commit();
     dir.failOn(failure.reset());
 
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
+    FieldType custom1 = new FieldType();
+    custom1.setStored(true);
+    FieldType custom2 = new FieldType(TextField.TYPE_UNSTORED);
+    custom2.setStored(true);
     for (int i = 0; i < keywords.length; i++) {
       Document doc = new Document();
-      doc.add(newField("id", keywords[i], Field.Store.YES,
-                        Field.Index.NOT_ANALYZED));
-      doc.add(newField("country", unindexed[i], Field.Store.YES,
-                        Field.Index.NO));
-      doc.add(newField("contents", unstored[i], Field.Store.NO,
-                        Field.Index.ANALYZED));
-      doc.add(newField("city", text[i], Field.Store.YES,
-                        Field.Index.ANALYZED));
+      doc.add(newField("id", keywords[i], custom));
+      doc.add(newField("country", unindexed[i], custom1));
+      doc.add(newField("contents", unstored[i], TextField.TYPE_UNSTORED));
+      doc.add(newField("city", text[i], custom2));
       try {
         modifier.addDocument(doc);
       } catch (IOException io) {
@@ -883,7 +892,7 @@ public class TestIndexWriterDelete exten
     Collections.shuffle(ids, random);
     for(int id : ids) {
       Document doc = new Document();
-      doc.add(newField("id", ""+id, Field.Index.NOT_ANALYZED));
+      doc.add(newField("id", ""+id, StringField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
     Collections.shuffle(ids, random);
@@ -917,7 +926,7 @@ public class TestIndexWriterDelete exten
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setRAMBufferSizeMB(1.0).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH));
     w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
-    doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", TextField.TYPE_UNSTORED));
     int num = atLeast(3);
     for (int iter = 0; iter < num; iter++) {
       int count = 0;

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Mon Aug 15 22:03:41 2011
@@ -34,6 +34,9 @@ import org.apache.lucene.analysis.TokenF
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
@@ -54,6 +57,36 @@ public class TestIndexWriterExceptions e
   private static class DocCopyIterator implements Iterable<Document> {
     private final Document doc;
     private final int count;
+    
+    /* private field types */
+    /* private field types */
+
+    private static final FieldType custom1 = new FieldType(TextField.TYPE_UNSTORED);
+    private static final FieldType custom2 = new FieldType();
+    private static final FieldType custom3 = new FieldType();
+    private static final FieldType custom4 = new FieldType(StringField.TYPE_UNSTORED);
+    private static final FieldType custom5 = new FieldType(TextField.TYPE_UNSTORED);
+    
+    static {
+
+      custom1.setStoreTermVectors(true);
+      custom1.setStoreTermVectorPositions(true);
+      custom1.setStoreTermVectorOffsets(true);
+      
+      custom2.setStored(true);
+      custom2.setIndexed(true);
+      
+      custom3.setStored(true);
+
+      custom4.setStoreTermVectors(true);
+      custom4.setStoreTermVectorPositions(true);
+      custom4.setStoreTermVectorOffsets(true);
+      
+      custom5.setStored(true);
+      custom5.setStoreTermVectors(true);
+      custom5.setStoreTermVectorPositions(true);
+      custom5.setStoreTermVectorOffsets(true);
+    }
 
     public DocCopyIterator(Document doc, int count) {
       this.count = count;
@@ -101,17 +134,17 @@ public class TestIndexWriterExceptions e
 
       final Document doc = new Document();
 
-      doc.add(newField("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.ANALYZED));
-      doc.add(newField("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      doc.add(newField("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NOT_ANALYZED));
-      doc.add(newField("content3", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NO));
+      doc.add(newField("content1", "aaa bbb ccc ddd", TextField.TYPE_STORED));
+      doc.add(newField("content6", "aaa bbb ccc ddd", DocCopyIterator.custom1));
+      doc.add(newField("content2", "aaa bbb ccc ddd", DocCopyIterator.custom2));
+      doc.add(newField("content3", "aaa bbb ccc ddd", DocCopyIterator.custom3));
 
-      doc.add(newField("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED));
-      doc.add(newField("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED));
+      doc.add(newField("content4", "aaa bbb ccc ddd", TextField.TYPE_UNSTORED));
+      doc.add(newField("content5", "aaa bbb ccc ddd", StringField.TYPE_UNSTORED));
 
-      doc.add(newField("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("content7", "aaa bbb ccc ddd", DocCopyIterator.custom4));
 
-      final Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+      final Field idField = newField("id", "", DocCopyIterator.custom2);
       doc.add(idField);
 
       final long stopTime = System.currentTimeMillis() + 500;
@@ -337,8 +370,7 @@ public class TestIndexWriterExceptions e
     MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
+    doc.add(newField("field", "a field", TextField.TYPE_STORED));
     w.addDocument(doc);
     w.doFail = true;
     try {
@@ -357,8 +389,7 @@ public class TestIndexWriterExceptions e
     MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
     w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
+    doc.add(newField("field", "a field", TextField.TYPE_STORED));
     w.addDocument(doc);
 
     Analyzer analyzer = new Analyzer() {
@@ -371,8 +402,7 @@ public class TestIndexWriterExceptions e
     };
 
     Document crashDoc = new Document();
-    crashDoc.add(newField("crash", "do it on token 4", Field.Store.YES,
-                           Field.Index.ANALYZED));
+    crashDoc.add(newField("crash", "do it on token 4", TextField.TYPE_STORED));
     try {
       w.addDocument(crashDoc, analyzer);
       fail("did not hit expected exception");
@@ -413,8 +443,7 @@ public class TestIndexWriterExceptions e
     MockIndexWriter3 w = new MockIndexWriter3(dir, conf);
     w.doFail = true;
     Document doc = new Document();
-    doc.add(newField("field", "a field", Field.Store.YES,
-                      Field.Index.ANALYZED));
+    doc.add(newField("field", "a field", TextField.TYPE_STORED));
     for(int i=0;i<10;i++)
       try {
         w.addDocument(doc);
@@ -457,8 +486,7 @@ public class TestIndexWriterExceptions e
 
     Document doc = new Document();
     String contents = "aa bb cc dd ee ff gg hh ii jj kk";
-    doc.add(newField("content", contents, Field.Store.NO,
-        Field.Index.ANALYZED));
+    doc.add(newField("content", contents, TextField.TYPE_UNSTORED));
     try {
       writer.addDocument(doc);
       fail("did not hit expected exception");
@@ -467,14 +495,12 @@ public class TestIndexWriterExceptions e
 
     // Make sure we can add another normal document
     doc = new Document();
-    doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
-        Field.Index.ANALYZED));
+    doc.add(newField("content", "aa bb cc dd", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
 
     // Make sure we can add another normal document
     doc = new Document();
-    doc.add(newField("content", "aa bb cc dd", Field.Store.NO,
-        Field.Index.ANALYZED));
+    doc.add(newField("content", "aa bb cc dd", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
 
     writer.close();
@@ -545,8 +571,7 @@ public class TestIndexWriterExceptions e
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
     Document doc = new Document();
     String contents = "aa bb cc dd ee ff gg hh ii jj kk";
-    doc.add(newField("content", contents, Field.Store.NO,
-        Field.Index.ANALYZED));
+    doc.add(newField("content", contents, TextField.TYPE_UNSTORED));
     boolean hitError = false;
     for(int i=0;i<200;i++) {
       try {
@@ -589,14 +614,11 @@ public class TestIndexWriterExceptions e
       lmp.setMergeFactor(Math.max(lmp.getMergeFactor(), 5));
 
       Document doc = new Document();
-      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
       writer.addDocument(doc);
       writer.addDocument(doc);
-      doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      doc.add(newField("other", "this will not get indexed", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("crash", "this should crash after 4 terms", DocCopyIterator.custom5));
+      doc.add(newField("other", "this will not get indexed", DocCopyIterator.custom5));
       try {
         writer.addDocument(doc);
         fail("did not hit expected exception");
@@ -609,8 +631,7 @@ public class TestIndexWriterExceptions e
 
       if (0 == i) {
         doc = new Document();
-        doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                          Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+        doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
         writer.addDocument(doc);
         writer.addDocument(doc);
       }
@@ -642,8 +663,7 @@ public class TestIndexWriterExceptions e
       writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
           analyzer).setMaxBufferedDocs(10));
       doc = new Document();
-      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
       for(int j=0;j<17;j++)
         writer.addDocument(doc);
       writer.optimize();
@@ -699,14 +719,11 @@ public class TestIndexWriterExceptions e
                 try {
                   for(int iter=0;iter<NUM_ITER;iter++) {
                     Document doc = new Document();
-                    doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                    doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
                     writer.addDocument(doc);
                     writer.addDocument(doc);
-                    doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES,
-                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-                    doc.add(newField("other", "this will not get indexed", Field.Store.YES,
-                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                    doc.add(newField("crash", "this should crash after 4 terms", DocCopyIterator.custom5));
+                    doc.add(newField("other", "this will not get indexed", DocCopyIterator.custom5));
                     try {
                       writer.addDocument(doc);
                       fail("did not hit expected exception");
@@ -715,8 +732,7 @@ public class TestIndexWriterExceptions e
 
                     if (0 == finalI) {
                       doc = new Document();
-                      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
                       writer.addDocument(doc);
                       writer.addDocument(doc);
                     }
@@ -761,8 +777,7 @@ public class TestIndexWriterExceptions e
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10));
       Document doc = new Document();
-      doc.add(newField("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5));
       for(int j=0;j<17;j++)
         writer.addDocument(doc);
       writer.optimize();
@@ -805,7 +820,7 @@ public class TestIndexWriterExceptions e
   private void addDoc(IndexWriter writer) throws IOException
   {
       Document doc = new Document();
-      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
       writer.addDocument(doc);
   }
 
@@ -901,8 +916,7 @@ public class TestIndexWriterExceptions e
       IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer(random)));
       Document doc = new Document();
-      doc.add(newField("field", "a field", Field.Store.YES,
-          Field.Index.ANALYZED));
+      doc.add(newField("field", "a field", TextField.TYPE_STORED));
       w.addDocument(doc);
       dir.failOn(failure);
       try {
@@ -1234,13 +1248,12 @@ public class TestIndexWriterExceptions e
         int numDocs = 10 + random.nextInt(30);
         for (int i = 0; i < numDocs; i++) {
           Document doc = new Document();
-          Field field = newField(random, "field", "a field", Field.Store.YES,
-              Field.Index.ANALYZED);
+          Field field = newField(random, "field", "a field", TextField.TYPE_STORED);
           doc.add(field);
           // random TV
           try {
             w.addDocument(doc);
-            assertFalse(field.isTermVectorStored());
+            assertFalse(field.storeTermVectors());
           } catch (RuntimeException e) {
             assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
           }
@@ -1251,19 +1264,17 @@ public class TestIndexWriterExceptions e
             
         }
         Document document = new Document();
-        document.add(new Field("field", "a field", Field.Store.YES,
-            Field.Index.ANALYZED));
+        document.add(new Field("field", TextField.TYPE_STORED, "a field"));
         w.addDocument(document);
 
         for (int i = 0; i < numDocs; i++) {
           Document doc = new Document();
-          Field field = newField(random, "field", "a field", Field.Store.YES,
-              Field.Index.ANALYZED);
+          Field field = newField(random, "field", "a field", TextField.TYPE_STORED);
           doc.add(field);
           // random TV
           try {
             w.addDocument(doc);
-            assertFalse(field.isTermVectorStored());
+            assertFalse(field.storeTermVectors());
           } catch (RuntimeException e) {
             assertTrue(e.getMessage().startsWith(FailOnTermVectors.EXC_MSG));
           }
@@ -1273,8 +1284,7 @@ public class TestIndexWriterExceptions e
           }
         }
         document = new Document();
-        document.add(new Field("field", "a field", Field.Store.YES,
-            Field.Index.ANALYZED));
+        document.add(new Field("field", TextField.TYPE_STORED, "a field"));
         w.addDocument(document);
         w.close();
         IndexReader reader = IndexReader.open(dir);
@@ -1328,7 +1338,7 @@ public class TestIndexWriterExceptions e
     final int numDocs1 = random.nextInt(25);
     for(int docCount=0;docCount<numDocs1;docCount++) {
       Document doc = new Document();
-      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      doc.add(newField("content", "good content", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
     
@@ -1336,10 +1346,10 @@ public class TestIndexWriterExceptions e
     for(int docCount=0;docCount<7;docCount++) {
       Document doc = new Document();
       docs.add(doc);
-      doc.add(newField("id", docCount+"", Field.Index.NOT_ANALYZED));
-      doc.add(newField("content", "silly content " + docCount, Field.Index.ANALYZED));
+      doc.add(newField("id", docCount+"", StringField.TYPE_UNSTORED));
+      doc.add(newField("content", "silly content " + docCount, TextField.TYPE_UNSTORED));
       if (docCount == 4) {
-        Field f = newField("crash", "", Field.Index.ANALYZED);
+        Field f = newField("crash", "", TextField.TYPE_UNSTORED);
         doc.add(f);
         MockTokenizer tokenizer = new MockTokenizer(new StringReader("crash me on the 4th token"), MockTokenizer.WHITESPACE, false);
         tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
@@ -1358,7 +1368,7 @@ public class TestIndexWriterExceptions e
     final int numDocs2 = random.nextInt(25);
     for(int docCount=0;docCount<numDocs2;docCount++) {
       Document doc = new Document();
-      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      doc.add(newField("content", "good content", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
 
@@ -1386,7 +1396,7 @@ public class TestIndexWriterExceptions e
     final int numDocs1 = random.nextInt(25);
     for(int docCount=0;docCount<numDocs1;docCount++) {
       Document doc = new Document();
-      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      doc.add(newField("content", "good content", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
 
@@ -1396,16 +1406,16 @@ public class TestIndexWriterExceptions e
     for(int docCount=0;docCount<numDocs2;docCount++) {
       Document doc = new Document();
       docs.add(doc);
-      doc.add(newField("subid", "subs", Field.Index.NOT_ANALYZED));
-      doc.add(newField("id", docCount+"", Field.Index.NOT_ANALYZED));
-      doc.add(newField("content", "silly content " + docCount, Field.Index.ANALYZED));
+      doc.add(newField("subid", "subs", StringField.TYPE_UNSTORED));
+      doc.add(newField("id", docCount+"", StringField.TYPE_UNSTORED));
+      doc.add(newField("content", "silly content " + docCount, TextField.TYPE_UNSTORED));
     }
     w.addDocuments(docs);
 
     final int numDocs3 = random.nextInt(25);
     for(int docCount=0;docCount<numDocs3;docCount++) {
       Document doc = new Document();
-      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      doc.add(newField("content", "good content", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
 
@@ -1415,10 +1425,10 @@ public class TestIndexWriterExceptions e
     for(int docCount=0;docCount<limit;docCount++) {
       Document doc = new Document();
       docs.add(doc);
-      doc.add(newField("id", docCount+"", Field.Index.NOT_ANALYZED));
-      doc.add(newField("content", "silly content " + docCount, Field.Index.ANALYZED));
+      doc.add(newField("id", docCount+"", StringField.TYPE_UNSTORED));
+      doc.add(newField("content", "silly content " + docCount, TextField.TYPE_UNSTORED));
       if (docCount == crashAt) {
-        Field f = newField("crash", "", Field.Index.ANALYZED);
+        Field f = newField("crash", "", TextField.TYPE_UNSTORED);
         doc.add(f);
         MockTokenizer tokenizer = new MockTokenizer(new StringReader("crash me on the 4th token"), MockTokenizer.WHITESPACE, false);
         tokenizer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases.
@@ -1438,7 +1448,7 @@ public class TestIndexWriterExceptions e
     final int numDocs4 = random.nextInt(25);
     for(int docCount=0;docCount<numDocs4;docCount++) {
       Document doc = new Document();
-      doc.add(newField("content", "good content", Field.Index.ANALYZED));
+      doc.add(newField("content", "good content", TextField.TYPE_UNSTORED));
       w.addDocument(doc);
     }
 

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java Mon Aug 15 22:03:41 2011
@@ -22,6 +22,7 @@ import java.io.IOException;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 
@@ -220,7 +221,7 @@ public class TestIndexWriterMergePolicy 
 
   private void addDoc(IndexWriter writer) throws IOException {
     Document doc = new Document();
-    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
   }
 

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java Mon Aug 15 22:03:41 2011
@@ -19,9 +19,9 @@ import org.apache.lucene.store.Directory
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -107,10 +107,12 @@ public class TestIndexWriterMerging exte
             setMergePolicy(newLogMergePolicy(2))
     );
 
+    FieldType custom = new FieldType(StringField.TYPE_UNSTORED);
+    custom.setStored(true);
     for (int i = start; i < (start + numDocs); i++)
     {
       Document temp = new Document();
-      temp.add(newField("count", (""+i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+      temp.add(newField("count", (""+i), custom));
 
       writer.addDocument(temp);
     }
@@ -129,12 +131,19 @@ public class TestIndexWriterMerging exte
     Document document = new Document();
 
     document = new Document();
-    Field storedField = newField("stored", "stored", Field.Store.YES,
-                                  Field.Index.NO);
+
+    FieldType customType = new FieldType();
+    customType.setStored(true);
+
+    FieldType customType1 = new FieldType(TextField.TYPE_UNSTORED);
+    customType1.setTokenized(false);
+    customType1.setStoreTermVectors(true);
+    customType1.setStoreTermVectorPositions(true);
+    customType1.setStoreTermVectorOffsets(true);
+    
+    Field storedField = newField("stored", "stored", customType);
     document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector",
-                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
-                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
+    Field termVectorField = newField("termVector", "termVector", customType1);
     document.add(termVectorField);
     for(int i=0;i<10;i++)
       writer.addDocument(document);
@@ -175,12 +184,19 @@ public class TestIndexWriterMerging exte
     Document document = new Document();
 
     document = new Document();
-    Field storedField = newField("stored", "stored", Store.YES,
-                                  Index.NO);
+
+    FieldType customType = new FieldType();
+    customType.setStored(true);
+
+    FieldType customType1 = new FieldType(TextField.TYPE_UNSTORED);
+    customType1.setTokenized(false);
+    customType1.setStoreTermVectors(true);
+    customType1.setStoreTermVectorPositions(true);
+    customType1.setStoreTermVectorOffsets(true);
+    
+    Field storedField = newField("stored", "stored", customType);
     document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector",
-                                      Store.NO, Index.NOT_ANALYZED,
-                                      TermVector.WITH_POSITIONS_OFFSETS);
+    Field termVectorField = newField("termVector", "termVector", customType1);
     document.add(termVectorField);
     for(int i=0;i<98;i++)
       writer.addDocument(document);
@@ -223,13 +239,19 @@ public class TestIndexWriterMerging exte
 
     Document document = new Document();
 
+    FieldType customType = new FieldType();
+    customType.setStored(true);
+
+    FieldType customType1 = new FieldType(TextField.TYPE_UNSTORED);
+    customType1.setTokenized(false);
+    customType1.setStoreTermVectors(true);
+    customType1.setStoreTermVectorPositions(true);
+    customType1.setStoreTermVectorOffsets(true);
+    
     document = new Document();
-    Field storedField = newField("stored", "stored", Field.Store.YES,
-                                  Field.Index.NO);
+    Field storedField = newField("stored", "stored", customType);
     document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector",
-                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
-                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
+    Field termVectorField = newField("termVector", "termVector", customType1);
     document.add(termVectorField);
     for(int i=0;i<98;i++)
       writer.addDocument(document);
@@ -292,8 +314,11 @@ public class TestIndexWriterMerging exte
     IndexWriter iw = new IndexWriter(dir, conf);
     iw.setInfoStream(VERBOSE ? System.out : null);
     Document document = new Document();
-    document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
-                           Field.TermVector.YES));
+
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStoreTermVectors(true);
+    
+    document.add(newField("tvtest", "a b c", customType));
     for(int i=0;i<177;i++)
       iw.addDocument(document);
     iw.close();

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java Mon Aug 15 22:03:41 2011
@@ -22,6 +22,8 @@ import java.io.IOException;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.search.IndexSearcher;
@@ -475,7 +477,11 @@ public class TestIndexWriterOnDiskFull e
     _TestUtil.keepFullyDeletedSegments(w);
 
     Document doc = new Document();
-    doc.add(newField("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED));
+
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(false);
+    
+    doc.add(newField("f", "doctor who", customType));
     w.addDocument(doc);
     w.commit();
 
@@ -511,7 +517,11 @@ public class TestIndexWriterOnDiskFull e
         .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
     dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes()));
     final Document doc = new Document();
-    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(false);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
     try {
       writer.addDocument(doc);
       fail("did not hit disk full");
@@ -541,15 +551,17 @@ public class TestIndexWriterOnDiskFull e
   private void addDoc(IndexWriter writer) throws IOException
   {
       Document doc = new Document();
-      doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
       writer.addDocument(doc);
   }
   
   private void addDocWithIndex(IndexWriter writer, int index) throws IOException
   {
       Document doc = new Document();
-      doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
-      doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(false);
+      doc.add(newField("content", "aaa " + index, customType));
+      doc.add(newField("id", "" + index, customType));
       writer.addDocument(doc);
   }
 }

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java Mon Aug 15 22:03:41 2011
@@ -21,10 +21,7 @@ import java.io.IOException;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -37,7 +34,7 @@ public class TestIndexWriterOptimize ext
     MockDirectoryWrapper dir = newDirectory();
 
     final Document doc = new Document();
-    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField("content", "aaa", StringField.TYPE_UNSTORED));
     final int incrMin = TEST_NIGHTLY ? 15 : 40;
     for(int numDocs=10;numDocs<500;numDocs += _TestUtil.nextInt(random, incrMin, 5*incrMin)) {
       LogDocMergePolicy ldmp = new LogDocMergePolicy();
@@ -78,7 +75,7 @@ public class TestIndexWriterOptimize ext
     MockDirectoryWrapper dir = newDirectory();
 
     final Document doc = new Document();
-    doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField("content", "aaa", StringField.TYPE_UNSTORED));
 
     LogDocMergePolicy ldmp = new LogDocMergePolicy();
     ldmp.setMinMergeDocs(1);
@@ -183,7 +180,7 @@ public class TestIndexWriterOptimize ext
               setMergePolicy(newLogMergePolicy(51))
       );
       Document doc = new Document();
-      doc.add(newField("field", "aaa", Store.NO, Index.NOT_ANALYZED));
+      doc.add(newField("field", "aaa", StringField.TYPE_UNSTORED));
       for(int i=0;i<100;i++)
         writer.addDocument(doc);
       writer.optimize(false);

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java Mon Aug 15 22:03:41 2011
@@ -28,9 +28,9 @@ import java.util.concurrent.atomic.Atomi
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -146,7 +146,7 @@ public class TestIndexWriterReader exten
     
     Document newDoc = r1.document(10);
     newDoc.removeField("id");
-    newDoc.add(newField("id", Integer.toString(8000), Store.YES, Index.NOT_ANALYZED));
+    newDoc.add(newField("id", Integer.toString(8000), StringField.TYPE_STORED));
     writer.updateDocument(new Term("id", id10), newDoc);
     assertFalse(r1.isCurrent());
 
@@ -170,7 +170,7 @@ public class TestIndexWriterReader exten
 
     writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "a b c", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
     assertTrue(r2.isCurrent());
     assertTrue(r3.isCurrent());
@@ -192,14 +192,14 @@ public class TestIndexWriterReader exten
     
     IndexWriter writer = new IndexWriter(dir, iwc);
     Document doc = new Document();
-    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "a b c", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
     writer.close();
     
     iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
     writer = new IndexWriter(dir, iwc);
     doc = new Document();
-    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "a b c", TextField.TYPE_UNSTORED));
     IndexReader nrtReader = writer.getReader();
     assertTrue(nrtReader.isCurrent());
     writer.addDocument(doc);
@@ -580,16 +580,27 @@ public class TestIndexWriterReader exten
   public static Document createDocument(int n, String indexName, int numFields) {
     StringBuilder sb = new StringBuilder();
     Document doc = new Document();
-    doc.add(new Field("id", Integer.toString(n), Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
-    doc.add(new Field("indexname", indexName, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+
+    FieldType customType1 = new FieldType(StringField.TYPE_UNSTORED);
+    customType1.setStored(true);
+    customType1.setStoreTermVectors(true);
+    customType1.setStoreTermVectorPositions(true);
+    customType1.setStoreTermVectorOffsets(true);
+    
+    doc.add(new Field("id", customType1, Integer.toString(n)));
+    doc.add(new Field("indexname", customType1, indexName));
     sb.append("a");
     sb.append(n);
-    doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("field1", customType, sb.toString()));
     sb.append(" b");
     sb.append(n);
     for (int i = 1; i < numFields; i++) {
-      doc.add(new Field("field" + (i + 1), sb.toString(), Store.YES,
-                        Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(new Field("field" + (i + 1), customType, sb.toString()));
     }
     return doc;
   }
@@ -915,8 +926,8 @@ public class TestIndexWriterReader exten
     Directory dir = newDirectory();
     final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
     Document doc = new Document();
-    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
-    Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    doc.add(newField("field", "a b c", TextField.TYPE_UNSTORED));
+    Field id = newField("id", "", StringField.TYPE_UNSTORED);
     doc.add(id);
     id.setValue("0");
     w.addDocument(doc);
@@ -939,8 +950,8 @@ public class TestIndexWriterReader exten
     Directory dir = newDirectory();
     final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
-    Field id = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    doc.add(newField("field", "a b c", TextField.TYPE_UNSTORED));
+    Field id = newField("id", "", StringField.TYPE_UNSTORED);
     doc.add(id);
     id.setValue("0");
     w.addDocument(doc);
@@ -997,7 +1008,9 @@ public class TestIndexWriterReader exten
     );
 
     Document doc = new Document();
-    doc.add(newField("foo", "bar", Field.Store.YES, Field.Index.NOT_ANALYZED));
+    FieldType customType = new FieldType(StringField.TYPE_UNSTORED);
+    customType.setStored(false);
+    doc.add(newField("foo", "bar", customType));
     for(int i=0;i<20;i++) {
       w.addDocument(doc);
     }
@@ -1023,7 +1036,7 @@ public class TestIndexWriterReader exten
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, conf);
     Document doc = new Document();
-    doc.add(new Field("f", "val", Store.NO, Index.ANALYZED));
+    doc.add(new TextField("f", "val"));
     w.addDocument(doc);
     IndexReader r = IndexReader.open(w, true).getSequentialSubReaders()[0];
     try {

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java Mon Aug 15 22:03:41 2011
@@ -26,6 +26,9 @@ import java.util.Set;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
@@ -234,10 +237,10 @@ public class TestIndexWriterUnicode exte
     Directory d = newDirectory();
     IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
     Document doc = new Document();
-    doc.add(newField("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "a a\uffffb", TextField.TYPE_UNSTORED));
     w.addDocument(doc);
     doc = new Document();
-    doc.add(newField("field", "a", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "a", TextField.TYPE_UNSTORED));
     w.addDocument(doc);
     IndexReader r = w.getReader();
     assertEquals(1, r.docFreq(new Term("field", "a\uffffb")));
@@ -252,9 +255,12 @@ public class TestIndexWriterUnicode exte
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new TestIndexWriter.StringSplitAnalyzer()));
     Document doc = new Document();
 
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    
     final int count = utf8Data.length/2;
     for(int i=0;i<count;i++)
-      doc.add(newField("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
+      doc.add(newField("f" + i, utf8Data[2*i], customType));
     w.addDocument(doc);
     w.close();
 
@@ -276,7 +282,7 @@ public class TestIndexWriterUnicode exte
     RandomIndexWriter writer = new RandomIndexWriter(rnd, dir);
     Document d = new Document();
     // Single segment
-    Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+    Field f = newField("f", "", StringField.TYPE_UNSTORED);
     d.add(f);
     char[] chars = new char[2];
     final Set<String> allTerms = new HashSet<String>();

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java Mon Aug 15 22:03:41 2011
@@ -22,7 +22,8 @@ import java.util.concurrent.CountDownLat
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MockDirectoryWrapper;
@@ -55,7 +56,13 @@ public class TestIndexWriterWithThreads 
     public void run() {
 
       final Document doc = new Document();
-      doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+      customType.setStored(true);
+      customType.setStoreTermVectors(true);
+      customType.setStoreTermVectorPositions(true);
+      customType.setStoreTermVectorOffsets(true);
+      
+      doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
 
       int idUpto = 0;
       int fullCount = 0;
@@ -291,7 +298,12 @@ public class TestIndexWriterWithThreads 
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
       .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
     final Document doc = new Document();
-    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    customType.setStoreTermVectors(true);
+    customType.setStoreTermVectorPositions(true);
+    customType.setStoreTermVectorOffsets(true);
+    doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType));
 
     for(int i=0;i<6;i++)
       writer.addDocument(doc);

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestIsCurrent.java Mon Aug 15 22:03:41 2011
@@ -18,8 +18,8 @@ package org.apache.lucene.index;
  */
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.util.*;
 import org.apache.lucene.store.*;
 
@@ -43,7 +43,9 @@ public class TestIsCurrent extends Lucen
 
     // write document
     Document doc = new Document();
-    doc.add(newField("UUID", "1", Store.YES, Index.ANALYZED));
+    FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+    customType.setStored(true);
+    doc.add(newField("UUID", "1", customType));
     writer.addDocument(doc);
     writer.commit();
   }

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java Mon Aug 15 22:03:41 2011
@@ -25,8 +25,9 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.codecs.CodecProvider;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.ScoreDoc;
@@ -85,6 +86,9 @@ public class TestLazyProxSkipping extend
                 setMaxBufferedDocs(10).
                 setMergePolicy(newLogMergePolicy(false))
         );
+        FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+        customType.setStored(true);
+        
         for (int i = 0; i < numDocs; i++) {
             Document doc = new Document();
             String content;
@@ -99,7 +103,7 @@ public class TestLazyProxSkipping extend
                 content = this.term3 + " " + this.term2;
             }
 
-            doc.add(newField(this.field, content, Field.Store.YES, Field.Index.ANALYZED));
+            doc.add(newField(this.field, content, customType));
             writer.addDocument(doc);
         }
         
@@ -146,9 +150,11 @@ public class TestLazyProxSkipping extend
     public void testSeek() throws IOException {
         Directory directory = newDirectory();
         IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+        FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+        customType.setStored(true);
         for (int i = 0; i < 10; i++) {
             Document doc = new Document();
-            doc.add(newField(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED));
+            doc.add(newField(this.field, "a b", customType));
             writer.addDocument(doc);
         }
         

Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestLongPostings.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestLongPostings.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestLongPostings.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/index/TestLongPostings.java Mon Aug 15 22:03:41 2011
@@ -26,7 +26,11 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+<<<<<<<
 import org.apache.lucene.index.FieldInfo.IndexOptions;
+=======
+import org.apache.lucene.document.TextField;
+>>>>>>>
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
@@ -111,7 +115,7 @@ public class TestLongPostings extends Lu
       for(int idx=0;idx<NUM_DOCS;idx++) {
         final Document doc = new Document();
         String s = isS1.get(idx) ? s1 : s2;
-        final Field f = newField("field", s, Field.Index.ANALYZED);
+        final Field f = newField("field", s, TextField.TYPE_UNSTORED);
         final int count = _TestUtil.nextInt(random, 1, 4);
         for(int ct=0;ct<count;ct++) {
           doc.add(f);