You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by mi...@apache.org on 2008/09/10 23:38:55 UTC

svn commit: r694004 [2/4] - in /lucene/java/trunk: ./ contrib/analyzers/src/test/org/apache/lucene/analysis/query/ contrib/analyzers/src/test/org/apache/lucene/analysis/shingle/ contrib/ant/src/java/org/apache/lucene/ant/ contrib/benchmark/src/java/org...

Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java Wed Sep 10 14:38:52 2008
@@ -43,8 +43,8 @@
                                          true, IndexWriter.MaxFieldLength.LIMITED);
 
     Document doc = new Document();
-    doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.UN_TOKENIZED));
-    doc.add(new Field("description", "Illidium Space Modulator", Field.Store.YES, Field.Index.TOKENIZED));
+    doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.NOT_ANALYZED));
+    doc.add(new Field("description", "Illidium Space Modulator", Field.Store.YES, Field.Index.ANALYZED));
     writer.addDocument(doc);
 
     writer.close();
@@ -69,10 +69,10 @@
     RAMDirectory dir = new RAMDirectory();
     IndexWriter writer = new IndexWriter(dir,new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     Document doc = new Document();
-    doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.TOKENIZED));
+    doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.ANALYZED));
     writer.addDocument(doc);
     doc = new Document();
-    doc.add(new Field("partnum", "Q37", Field.Store.YES, Field.Index.TOKENIZED));
+    doc.add(new Field("partnum", "Q37", Field.Store.YES, Field.Index.ANALYZED));
     writer.addDocument(doc);
     writer.close();
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java Wed Sep 10 14:38:52 2008
@@ -121,7 +121,7 @@
   public void testConstructorExceptions()
   {
     new Field("name", "value", Field.Store.YES, Field.Index.NO);  // okay
-    new Field("name", "value", Field.Store.NO, Field.Index.UN_TOKENIZED);  // okay
+    new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED);  // okay
     try {
       new Field("name", "value", Field.Store.NO, Field.Index.NO);
       fail();
@@ -177,14 +177,14 @@
     private Document makeDocumentWithFields()
     {
         Document doc = new Document();
-        doc.add(new Field(  "keyword",   "test1", Field.Store.YES, Field.Index.UN_TOKENIZED));
-        doc.add(new Field(  "keyword",   "test2", Field.Store.YES, Field.Index.UN_TOKENIZED));
-        doc.add(new Field(     "text",      "test1", Field.Store.YES, Field.Index.TOKENIZED));
-        doc.add(new Field(     "text",      "test2", Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field(  "keyword",   "test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        doc.add(new Field(  "keyword",   "test2", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        doc.add(new Field(     "text",      "test1", Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field(     "text",      "test2", Field.Store.YES, Field.Index.ANALYZED));
         doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO));
         doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO));
-        doc.add(new Field( "unstored",  "test1", Field.Store.NO, Field.Index.TOKENIZED));
-        doc.add(new Field( "unstored",  "test2", Field.Store.NO, Field.Index.TOKENIZED));
+        doc.add(new Field( "unstored",  "test1", Field.Store.NO, Field.Index.ANALYZED));
+        doc.add(new Field( "unstored",  "test2", Field.Store.NO, Field.Index.ANALYZED));
         return doc;
     }
 
@@ -222,10 +222,10 @@
 
     public void testFieldSetValue() throws Exception {
 
-      Field field = new Field("id", "id1", Field.Store.YES, Field.Index.UN_TOKENIZED);
+      Field field = new Field("id", "id1", Field.Store.YES, Field.Index.NOT_ANALYZED);
       Document doc = new Document();
       doc.add(field);
-      doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.UN_TOKENIZED));
+      doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED));
 
       RAMDirectory dir = new RAMDirectory();
       IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java Wed Sep 10 14:38:52 2008
@@ -34,35 +34,35 @@
   public static final String FIELD_1_TEXT = "field one text";
   public static final String TEXT_FIELD_1_KEY = "textField1";
   public static Field textField1 = new Field(TEXT_FIELD_1_KEY, FIELD_1_TEXT,
-      Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO);
+      Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO);
   
   public static final String FIELD_2_TEXT = "field field field two text";
   //Fields will be lexicographically sorted.  So, the order is: field, text, two
   public static final int [] FIELD_2_FREQS = {3, 1, 1}; 
   public static final String TEXT_FIELD_2_KEY = "textField2";
-  public static Field textField2 = new Field(TEXT_FIELD_2_KEY, FIELD_2_TEXT, Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+  public static Field textField2 = new Field(TEXT_FIELD_2_KEY, FIELD_2_TEXT, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
   
   public static final String FIELD_2_COMPRESSED_TEXT = "field field field two text";
     //Fields will be lexicographically sorted.  So, the order is: field, text, two
     public static final int [] COMPRESSED_FIELD_2_FREQS = {3, 1, 1}; 
     public static final String COMPRESSED_TEXT_FIELD_2_KEY = "compressedTextField2";
-    public static Field compressedTextField2 = new Field(COMPRESSED_TEXT_FIELD_2_KEY, FIELD_2_COMPRESSED_TEXT, Field.Store.COMPRESS, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+    public static Field compressedTextField2 = new Field(COMPRESSED_TEXT_FIELD_2_KEY, FIELD_2_COMPRESSED_TEXT, Field.Store.COMPRESS, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
     
 
   public static final String FIELD_3_TEXT = "aaaNoNorms aaaNoNorms bbbNoNorms";
   public static final String TEXT_FIELD_3_KEY = "textField3";
-  public static Field textField3 = new Field(TEXT_FIELD_3_KEY, FIELD_3_TEXT, Field.Store.YES, Field.Index.TOKENIZED);
+  public static Field textField3 = new Field(TEXT_FIELD_3_KEY, FIELD_3_TEXT, Field.Store.YES, Field.Index.ANALYZED);
   static { textField3.setOmitNorms(true); }
 
   public static final String KEYWORD_TEXT = "Keyword";
   public static final String KEYWORD_FIELD_KEY = "keyField";
   public static Field keyField = new Field(KEYWORD_FIELD_KEY, KEYWORD_TEXT,
-      Field.Store.YES, Field.Index.UN_TOKENIZED);
+      Field.Store.YES, Field.Index.NOT_ANALYZED);
 
   public static final String NO_NORMS_TEXT = "omitNormsText";
   public static final String NO_NORMS_KEY = "omitNorms";
   public static Field noNormsField = new Field(NO_NORMS_KEY, NO_NORMS_TEXT,
-      Field.Store.YES, Field.Index.NO_NORMS);
+      Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
 
   public static final String UNINDEXED_FIELD_TEXT = "unindexed field text";
   public static final String UNINDEXED_FIELD_KEY = "unIndField";
@@ -73,12 +73,12 @@
   public static final String UNSTORED_1_FIELD_TEXT = "unstored field text";
   public static final String UNSTORED_FIELD_1_KEY = "unStoredField1";
   public static Field unStoredField1 = new Field(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT,
-      Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO);
+      Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO);
 
   public static final String UNSTORED_2_FIELD_TEXT = "unstored field text";
   public static final String UNSTORED_FIELD_2_KEY = "unStoredField2";
   public static Field unStoredField2 = new Field(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT,
-      Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES);
+      Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES);
 
   public static final String LAZY_FIELD_BINARY_KEY = "lazyFieldBinary";
   public static byte [] LAZY_FIELD_BINARY_BYTES;
@@ -86,7 +86,7 @@
   
   public static final String LAZY_FIELD_KEY = "lazyField";
   public static final String LAZY_FIELD_TEXT = "These are some field bytes";
-  public static Field lazyField = new Field(LAZY_FIELD_KEY, LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.TOKENIZED);
+  public static Field lazyField = new Field(LAZY_FIELD_KEY, LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED);
   
   public static final String LARGE_LAZY_FIELD_KEY = "largeLazyField";
   public static String LARGE_LAZY_FIELD_TEXT;
@@ -96,14 +96,14 @@
   public static final String FIELD_UTF1_TEXT = "field one \u4e00text";
   public static final String TEXT_FIELD_UTF1_KEY = "textField1Utf8";
   public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT,
-      Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO);
+      Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO);
 
   public static final String FIELD_UTF2_TEXT = "field field field \u4e00two text";
   //Fields will be lexicographically sorted.  So, the order is: field, text, two
   public static final int [] FIELD_UTF2_FREQS = {3, 1, 1};
   public static final String TEXT_FIELD_UTF2_KEY = "textField2Utf8";
   public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT, Field.Store.YES, 
-          Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+          Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
  
   
   
@@ -155,7 +155,7 @@
     lazyFieldBinary = new Field(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES, Field.Store.YES);
     fields[fields.length - 2] = lazyFieldBinary;
     LARGE_LAZY_FIELD_TEXT = buffer.toString();
-    largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.TOKENIZED);
+    largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED);
     fields[fields.length - 1] = largeLazyField;
     for (int i=0; i<fields.length; i++) {
       Fieldable f = fields[i];

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java Wed Sep 10 14:38:52 2008
@@ -137,9 +137,9 @@
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
       Document doc = new Document();
-      doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.UN_TOKENIZED));
+      doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
       doc.add(new Field("content", "bbb " + i, Field.Store.NO,
-                        Field.Index.TOKENIZED));
+                        Field.Index.ANALYZED));
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
     // Deletes one of the 10 added docs, leaving 9:
@@ -172,9 +172,9 @@
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
       Document doc = new Document();
-      doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.UN_TOKENIZED));
+      doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
       doc.add(new Field("content", "bbb " + i, Field.Store.NO,
-                        Field.Index.TOKENIZED));
+                        Field.Index.ANALYZED));
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
 
@@ -210,9 +210,9 @@
     // docs, so 10 pending deletes:
     for (int i = 0; i < 20; i++) {
       Document doc = new Document();
-      doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.UN_TOKENIZED));
+      doc.add(new Field("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED));
       doc.add(new Field("content", "bbb " + i, Field.Store.NO,
-                        Field.Index.TOKENIZED));
+                        Field.Index.ANALYZED));
       writer.updateDocument(new Term("id", "" + (i%10)), doc);
     }
 
@@ -434,7 +434,7 @@
     for (int i = 0; i < numDocs; i++) {
       Document doc = new Document();
       doc.add(new Field("content", "aaa", Field.Store.NO,
-                        Field.Index.TOKENIZED));
+                        Field.Index.ANALYZED));
       writer.addDocument(doc);
     }
   }
@@ -443,7 +443,7 @@
     for (int i = 0; i < numDocs; i++) {
       Document doc = new Document();
       doc.add(new Field("content", "bbb", Field.Store.NO,
-                        Field.Index.TOKENIZED));
+                        Field.Index.ANALYZED));
       writer.addDocument(doc);
     }
   }
@@ -507,7 +507,7 @@
 
     Document doc = new Document();
     doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
-                      Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     for(int i=0;i<60;i++)
       writer.addDocument(doc);
     writer.setMaxBufferedDocs(200);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java Wed Sep 10 14:38:52 2008
@@ -95,8 +95,8 @@
       // Update all 100 docs...
       for(int i=0; i<100; i++) {
         Document d = new Document();
-        d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
-        d.add(new Field("contents", English.intToEnglish(i+10*count), Field.Store.NO, Field.Index.TOKENIZED));
+        d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+        d.add(new Field("contents", English.intToEnglish(i+10*count), Field.Store.NO, Field.Index.ANALYZED));
         writer.updateDocument(new Term("id", Integer.toString(i)), d);
       }
     }
@@ -132,8 +132,8 @@
     // Establish a base index of 100 docs:
     for(int i=0;i<100;i++) {
       Document d = new Document();
-      d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
-      d.add(new Field("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.TOKENIZED));
+      d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+      d.add(new Field("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
       writer.addDocument(d);
     }
     writer.commit();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Wed Sep 10 14:38:52 2008
@@ -474,11 +474,11 @@
   private void addDoc(IndexWriter writer, int id) throws IOException
   {
     Document doc = new Document();
-    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
-    doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.UN_TOKENIZED));
-    doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-    doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-    doc.add(new Field("content2", "here is more content with aaa aaa aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
+    doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("content2", "here is more content with aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     writer.addDocument(doc);
   }
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java Wed Sep 10 14:38:52 2008
@@ -37,7 +37,7 @@
                                           IndexWriter.MaxFieldLength.LIMITED);      
     writer.setMaxBufferedDocs(2);
     Document doc = new Document();
-    doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     for(int i=0;i<19;i++) {
       writer.addDocument(doc);
     }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Wed Sep 10 14:38:52 2008
@@ -68,7 +68,7 @@
     writer.setMergeScheduler(cms);
     writer.setMaxBufferedDocs(2);
     Document doc = new Document();
-    Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
+    Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
     for(int i=0;i<10;i++) {
       for(int j=0;j<20;j++) {
@@ -113,7 +113,7 @@
     mp.setMinMergeDocs(1000);
 
     Document doc = new Document();
-    Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
+    Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
     for(int i=0;i<10;i++) {
       for(int j=0;j<100;j++) {
@@ -154,7 +154,7 @@
 
         for(int j=0;j<21;j++) {
           Document doc = new Document();
-          doc.add(new Field("content", "a b c", Field.Store.NO, Field.Index.TOKENIZED));
+          doc.add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED));
           writer.addDocument(doc);
         }
         
@@ -175,7 +175,7 @@
     RAMDirectory directory = new MockRAMDirectory();
 
     Document doc = new Document();
-    Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
+    Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
 
     for(int pass=0;pass<2;pass++) {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java Wed Sep 10 14:38:52 2008
@@ -41,8 +41,8 @@
     ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();
 
     Document doc = new Document();
-    doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
-    doc.add(new Field("id", "0", Field.Store.YES, Field.Index.TOKENIZED));
+    doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(new Field("id", "0", Field.Store.YES, Field.Index.ANALYZED));
     for(int i=0;i<157;i++)
       writer.addDocument(doc);
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java Wed Sep 10 14:38:52 2008
@@ -654,7 +654,7 @@
   private void addDoc(IndexWriter writer) throws IOException
   {
     Document doc = new Document();
-    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
+    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
     writer.addDocument(doc);
   }
 }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java Wed Sep 10 14:38:52 2008
@@ -25,6 +25,7 @@
 import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
 import org.apache.lucene.demo.FileDocument;
 
 import java.io.*;
@@ -159,7 +160,6 @@
       assertEquals(multiFileOutput, singleFileOutput);
    }
 
-
    private SegmentInfo indexDoc(IndexWriter writer, String fileName)
    throws Exception
    {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java Wed Sep 10 14:38:52 2008
@@ -116,8 +116,8 @@
     IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
 
     Document doc = new Document();
-    doc.add(new Field("repeated", "repeated one", Field.Store.YES, Field.Index.TOKENIZED));
-    doc.add(new Field("repeated", "repeated two", Field.Store.YES, Field.Index.TOKENIZED));
+    doc.add(new Field("repeated", "repeated one", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(new Field("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED));
 
     writer.addDocument(doc);
     writer.flush();
@@ -172,7 +172,7 @@
     IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
 
     Document doc = new Document();
-    doc.add(new Field("f1", "a 5 a a", Field.Store.YES, Field.Index.TOKENIZED));
+    doc.add(new Field("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED));
 
     writer.addDocument(doc);
     writer.flush();
@@ -243,11 +243,11 @@
   public void testMixedTermVectorSettingsSameField() throws Exception {
     Document doc = new Document();
     // f1 first without tv then with tv
-    doc.add(new Field("f1", "v1", Store.YES, Index.UN_TOKENIZED, TermVector.NO));
-    doc.add(new Field("f1", "v2", Store.YES, Index.UN_TOKENIZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("f1", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
+    doc.add(new Field("f1", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
     // f2 first with tv then without tv
-    doc.add(new Field("f2", "v1", Store.YES, Index.UN_TOKENIZED, TermVector.WITH_POSITIONS_OFFSETS));
-    doc.add(new Field("f2", "v2", Store.YES, Index.UN_TOKENIZED, TermVector.NO));
+    doc.add(new Field("f2", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("f2", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
 
     RAMDirectory ram = new RAMDirectory();
     IndexWriter writer = new IndexWriter(ram, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestFilterIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestFilterIndexReader.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestFilterIndexReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestFilterIndexReader.java Wed Sep 10 14:38:52 2008
@@ -97,15 +97,15 @@
                                          IndexWriter.MaxFieldLength.LIMITED);
 
     Document d1 = new Document();
-    d1.add(new Field("default","one two", Field.Store.YES, Field.Index.TOKENIZED));
+    d1.add(new Field("default","one two", Field.Store.YES, Field.Index.ANALYZED));
     writer.addDocument(d1);
 
     Document d2 = new Document();
-    d2.add(new Field("default","one three", Field.Store.YES, Field.Index.TOKENIZED));
+    d2.add(new Field("default","one three", Field.Store.YES, Field.Index.ANALYZED));
     writer.addDocument(d2);
 
     Document d3 = new Document();
-    d3.add(new Field("default","two four", Field.Store.YES, Field.Index.TOKENIZED));
+    d3.add(new Field("default","two four", Field.Store.YES, Field.Index.ANALYZED));
     writer.addDocument(d3);
 
     writer.close();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Wed Sep 10 14:38:52 2008
@@ -189,8 +189,8 @@
   private void addDoc(IndexWriter writer, int id) throws IOException
   {
     Document doc = new Document();
-    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
-    doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.UN_TOKENIZED));
+    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED));
     writer.addDocument(doc);
   }
 }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java Wed Sep 10 14:38:52 2008
@@ -127,8 +127,8 @@
   
   private Document getDoc() {
     Document doc = new Document();
-    doc.add(new Field("body", Integer.toString(docCount), Field.Store.YES, Field.Index.UN_TOKENIZED));
-    doc.add(new Field("all", "x", Field.Store.YES, Field.Index.UN_TOKENIZED));
+    doc.add(new Field("body", Integer.toString(docCount), Field.Store.YES, Field.Index.NOT_ANALYZED));
+    doc.add(new Field("all", "x", Field.Store.YES, Field.Index.NOT_ANALYZED));
     docCount++;
     return doc;
   }
@@ -272,15 +272,15 @@
     Document doc = new Document();
     synchronized (getClass()) {
       doc.add(new Field("id", Integer.toString(id), Field.Store.YES,
-          Field.Index.UN_TOKENIZED));
+          Field.Index.NOT_ANALYZED));
       id++;
     }
     // add random stuff:
     doc.add(new Field("content", Integer.toString(random.nextInt(1000)), Field.Store.YES,
-        Field.Index.TOKENIZED));
+        Field.Index.ANALYZED));
     doc.add(new Field("content", Integer.toString(random.nextInt(1000)), Field.Store.YES,
-        Field.Index.TOKENIZED));
-    doc.add(new Field("all", "x", Field.Store.YES, Field.Index.TOKENIZED));
+        Field.Index.ANALYZED));
+    doc.add(new Field("all", "x", Field.Store.YES, Field.Index.ANALYZED));
     return doc;
   }
   

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java Wed Sep 10 14:38:52 2008
@@ -190,11 +190,11 @@
     // new termvector fields
     for (int i = 0; i < 5 * writer.getMergeFactor(); i++) {
       Document doc = new Document();
-        doc.add(new Field("tvnot","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO));
-        doc.add(new Field("termvector","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));
-        doc.add(new Field("tvoffset","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_OFFSETS));
-        doc.add(new Field("tvposition","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
-        doc.add(new Field("tvpositionoffset","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+        doc.add(new Field("tvnot","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
+        doc.add(new Field("termvector","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
+        doc.add(new Field("tvoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
+        doc.add(new Field("tvposition","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
+        doc.add(new Field("tvpositionoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
 
         writer.addDocument(doc);
     }
@@ -773,8 +773,8 @@
       IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       for(int i=0;i<157;i++) {
         Document d = new Document();
-        d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.UN_TOKENIZED));
-        d.add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.TOKENIZED));
+        d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+        d.add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
         writer.addDocument(d);
       }
       writer.close();
@@ -1170,31 +1170,31 @@
     private void addDocumentWithFields(IndexWriter writer) throws IOException
     {
         Document doc = new Document();
-        doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.UN_TOKENIZED));
-        doc.add(new Field("text","test1", Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
         doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
-        doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.TOKENIZED));
+        doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
         writer.addDocument(doc);
     }
 
     private void addDocumentWithDifferentFields(IndexWriter writer) throws IOException
     {
         Document doc = new Document();
-        doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.UN_TOKENIZED));
-        doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+        doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.ANALYZED));
         doc.add(new Field("unindexed2","test1", Field.Store.YES, Field.Index.NO));
-        doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.TOKENIZED));
+        doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED));
         writer.addDocument(doc);
     }
 
     private void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException
     {
         Document doc = new Document();
-        doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO));
-        doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));
-        doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_OFFSETS));
-        doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
-        doc.add(new Field("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+        doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
+        doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
+        doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
+        doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
+        doc.add(new Field("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
         
         writer.addDocument(doc);
     }
@@ -1202,7 +1202,7 @@
     private void addDoc(IndexWriter writer, String value) throws IOException
     {
         Document doc = new Document();
-        doc.add(new Field("content", value, Field.Store.NO, Field.Index.TOKENIZED));
+        doc.add(new Field("content", value, Field.Store.NO, Field.Index.ANALYZED));
         writer.addDocument(doc);
     }
     private void rmDir(File dir) {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java Wed Sep 10 14:38:52 2008
@@ -157,7 +157,7 @@
       for (int i=0; i<4; i++) {
         for (int j=0; j<M; j++) {
           Document doc = new Document();
-          doc.add(new Field("id", i+"_"+j, Store.YES, Index.UN_TOKENIZED));
+          doc.add(new Field("id", i+"_"+j, Store.YES, Index.NOT_ANALYZED));
           iwriter.addDocument(doc);
           if (i>0) {
             int k = i-1;
@@ -884,11 +884,11 @@
     Document doc = new Document();
     sb.append("a");
     sb.append(n);
-    doc.add(new Field("field1", sb.toString(), Store.YES, Index.TOKENIZED));
+    doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED));
     sb.append(" b");
     sb.append(n);
     for (int i = 1; i < numFields; i++) {
-      doc.add(new Field("field" + (i+1), sb.toString(), Store.YES, Index.TOKENIZED));
+      doc.add(new Field("field" + (i+1), sb.toString(), Store.YES, Index.ANALYZED));
     }
     return doc;
   }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java Wed Sep 10 14:38:52 2008
@@ -129,15 +129,15 @@
     private static void addDoc(IndexWriter writer) throws IOException
     {
         Document doc = new Document();
-        doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
+        doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
         writer.addDocument(doc);
     }
 
     private void addDocWithIndex(IndexWriter writer, int index) throws IOException
     {
         Document doc = new Document();
-        doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.TOKENIZED));
-        doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
         writer.addDocument(doc);
     }
 
@@ -566,12 +566,12 @@
       // Max length term is 16383, so this contents produces
       // a too-long term:
       String contents = "abc xyz x" + bigTerm + " another term";
-      doc.add(new Field("content", contents, Field.Store.NO, Field.Index.TOKENIZED));
+      doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED));
       writer.addDocument(doc);
 
       // Make sure we can add another normal document
       doc = new Document();
-      doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.TOKENIZED));
+      doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.ANALYZED));
       writer.addDocument(doc);
       writer.close();
 
@@ -599,7 +599,7 @@
       // Make sure we can add a document with exactly the
       // maximum length term, and search on that term:
       doc = new Document();
-      doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.TOKENIZED));
+      doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.ANALYZED));
       StandardAnalyzer sa = new StandardAnalyzer();
       sa.setMaxTokenLength(100000);
       writer  = new IndexWriter(dir, sa, IndexWriter.MaxFieldLength.LIMITED);
@@ -617,7 +617,7 @@
       MockRAMDirectory dir = new MockRAMDirectory();
 
       final Document doc = new Document();
-      doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
+      doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
 
       for(int numDocs=38;numDocs<500;numDocs += 38) {
         IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
@@ -655,7 +655,7 @@
       MockRAMDirectory dir = new MockRAMDirectory();
 
       final Document doc = new Document();
-      doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
+      doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
 
       IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       LogDocMergePolicy ldmp = new LogDocMergePolicy();
@@ -1245,12 +1245,12 @@
       writer.setMaxBufferedDocs(10);
       for(int j=0;j<100;j++) {
         Document doc = new Document();
-        doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
-        doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
-        doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
-        doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
-        doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
-        doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
+        doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
         writer.addDocument(doc);
       }
       writer.close();
@@ -1277,7 +1277,7 @@
       int lastNumFile = dir.list().length;
       for(int j=0;j<9;j++) {
         Document doc = new Document();
-        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
         writer.addDocument(doc);
         int numFile = dir.list().length;
         // Verify that with a tiny RAM buffer we see new
@@ -1300,7 +1300,7 @@
       int lastFlushCount = -1;
       for(int j=1;j<52;j++) {
         Document doc = new Document();
-        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
         writer.addDocument(doc);
         _TestUtil.syncConcurrentMerges(writer);
         int flushCount = writer.getFlushCount();
@@ -1354,7 +1354,7 @@
 
       for(int j=1;j<52;j++) {
         Document doc = new Document();
-        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+        doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
         writer.addDocument(doc);
       }
       
@@ -1416,7 +1416,7 @@
         for(int j=0;j<100;j++) {
           Document doc = new Document();
           for(int k=0;k<100;k++) {
-            doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES, Field.Index.TOKENIZED));
+            doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
           }
           writer.addDocument(doc);
         }
@@ -1425,7 +1425,7 @@
         // occurs (heavy on byte blocks)
         for(int j=0;j<100;j++) {
           Document doc = new Document();
-          doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.TOKENIZED));
+          doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
           writer.addDocument(doc);
         }
 
@@ -1440,7 +1440,7 @@
           String longTerm = b.toString();
 
           Document doc = new Document();
-          doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.TOKENIZED));
+          doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
           writer.addDocument(doc);
         }
       }
@@ -1461,7 +1461,7 @@
       // Enable norms for only 1 doc, pre flush
       for(int j=0;j<10;j++) {
         Document doc = new Document();
-        Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED); 
+        Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); 
         if (j != 8) {
           f.setOmitNorms(true);
         }
@@ -1482,7 +1482,7 @@
       // Enable norms for only 1 doc, post flush
       for(int j=0;j<27;j++) {
         Document doc = new Document();
-        Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED); 
+        Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); 
         if (j != 26) {
           f.setOmitNorms(true);
         }
@@ -1514,7 +1514,7 @@
         b.append(" a a a a a a a a");
       }
       Document doc = new Document();
-      doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
       writer.addDocument(doc);
       writer.close();
 
@@ -1571,7 +1571,7 @@
       IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       writer.setMaxBufferedDocs(2);
       Document doc = new Document();
-      doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
       for(int i=0;i<19;i++)
         writer.addDocument(doc);
       writer.flush(false, true, true);
@@ -1589,7 +1589,7 @@
       Directory dir = new RAMDirectory();
       IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       Document doc = new Document();
-      doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
       writer.addDocument(doc);
       writer.flush();
       writer.addDocument(new Document());
@@ -1609,7 +1609,7 @@
         IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
         writer.setMergeScheduler(new ConcurrentMergeScheduler());
         Document doc = new Document();
-        doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+        doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
         writer.setMaxBufferedDocs(2);
         writer.setMergeFactor(101);
         for(int i=0;i<200;i++)
@@ -1663,7 +1663,7 @@
     IndexWriter ir = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     
     Document document = new Document();
-    document.add(new Field("tvtest", "", Field.Store.NO, Field.Index.TOKENIZED,
+    document.add(new Field("tvtest", "", Field.Store.NO, Field.Index.ANALYZED,
         Field.TermVector.YES));
     ir.addDocument(document);
     ir.close();
@@ -1675,17 +1675,17 @@
     MockRAMDirectory dir = new MockRAMDirectory();
     IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     Document document = new Document();
-    document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+    document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
         Field.TermVector.YES));
     iw.addDocument(document);
     document = new Document();
-    document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.TOKENIZED,
+    document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
                            Field.TermVector.NO));
     iw.addDocument(document);
     // Make first segment
     iw.flush();
 
-    document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+    document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
         Field.TermVector.YES));
     iw.addDocument(document);
     // Make 2nd segment
@@ -1701,13 +1701,13 @@
     MockRAMDirectory dir = new MockRAMDirectory();
     IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     Document document = new Document();
-    document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+    document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
         Field.TermVector.YES));
     iw.addDocument(document);
     iw.flush();
 
     document = new Document();
-    document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.TOKENIZED,
+    document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
                            Field.TermVector.NO));
     iw.addDocument(document);
     // Make first segment
@@ -1715,7 +1715,7 @@
 
     iw.optimize();
 
-    document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+    document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
         Field.TermVector.YES));
     iw.addDocument(document);
     // Make 2nd segment
@@ -1733,7 +1733,7 @@
       MockRAMDirectory dir = new MockRAMDirectory();
       IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
       Document document = new Document();
-      document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+      document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
                              Field.TermVector.YES));
       iw.setMaxBufferedDocs(2);
       iw.setMergeFactor(2);
@@ -1775,7 +1775,7 @@
     iw.setMaxBufferedDocs(2);
     iw.setMergeFactor(2);
     Document document = new Document();
-    document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+    document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
                            Field.TermVector.YES));
     for(int i=0;i<177;i++)
       iw.addDocument(document);
@@ -1805,7 +1805,7 @@
     Document doc = new Document();
     String contents = "aa bb cc dd ee ff gg hh ii jj kk";
     doc.add(new Field("content", contents, Field.Store.NO,
-        Field.Index.TOKENIZED));
+        Field.Index.ANALYZED));
     try {
       writer.addDocument(doc);
       fail("did not hit expected exception");
@@ -1815,13 +1815,13 @@
     // Make sure we can add another normal document
     doc = new Document();
     doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
-        Field.Index.TOKENIZED));
+        Field.Index.ANALYZED));
     writer.addDocument(doc);
 
     // Make sure we can add another normal document
     doc = new Document();
     doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
-        Field.Index.TOKENIZED));
+        Field.Index.ANALYZED));
     writer.addDocument(doc);
 
     writer.close();
@@ -1887,7 +1887,7 @@
     Document doc = new Document();
     String contents = "aa bb cc dd ee ff gg hh ii jj kk";
     doc.add(new Field("content", contents, Field.Store.NO,
-        Field.Index.TOKENIZED));
+        Field.Index.ANALYZED));
     boolean hitError = false;
     for(int i=0;i<200;i++) {
       try {
@@ -1939,13 +1939,13 @@
       //writer.setInfoStream(System.out);
       Document doc = new Document();
       doc.add(new Field("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
       writer.addDocument(doc);
       writer.addDocument(doc);
       doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
-                        Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
       doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
-                        Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
       try {
         writer.addDocument(doc);
         fail("did not hit expected exception");
@@ -1955,7 +1955,7 @@
       if (0 == i) {
         doc = new Document();
         doc.add(new Field("contents", "here are some contents", Field.Store.YES,
-                          Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                          Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
         writer.addDocument(doc);
         writer.addDocument(doc);
       }
@@ -1982,7 +1982,7 @@
       writer.setMaxBufferedDocs(10);
       doc = new Document();
       doc.add(new Field("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
       for(int j=0;j<17;j++)
         writer.addDocument(doc);
       writer.optimize();
@@ -2034,13 +2034,13 @@
                   for(int iter=0;iter<NUM_ITER;iter++) {
                     Document doc = new Document();
                     doc.add(new Field("contents", "here are some contents", Field.Store.YES,
-                                      Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
                     writer.addDocument(doc);
                     writer.addDocument(doc);
                     doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
-                                      Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
                     doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
-                                      Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                                      Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
                     try {
                       writer.addDocument(doc);
                       fail("did not hit expected exception");
@@ -2050,7 +2050,7 @@
                     if (0 == finalI) {
                       doc = new Document();
                       doc.add(new Field("contents", "here are some contents", Field.Store.YES,
-                                        Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
                       writer.addDocument(doc);
                       writer.addDocument(doc);
                     }
@@ -2100,7 +2100,7 @@
       writer.setMaxBufferedDocs(10);
       Document doc = new Document();
       doc.add(new Field("contents", "here are some contents", Field.Store.YES,
-                        Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+                        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
       for(int j=0;j<17;j++)
         writer.addDocument(doc);
       writer.optimize();
@@ -2140,21 +2140,21 @@
       if (i == 7) {
         // Add empty docs here
         doc.add(new Field("content3", "", Field.Store.NO,
-                          Field.Index.TOKENIZED));
+                          Field.Index.ANALYZED));
       } else {
         Field.Store storeVal;
         if (i%2 == 0) {
           doc.add(new Field("content4", contents, Field.Store.YES,
-                            Field.Index.TOKENIZED));
+                            Field.Index.ANALYZED));
           storeVal = Field.Store.YES;
         } else
           storeVal = Field.Store.NO;
         doc.add(new Field("content1", contents, storeVal,
-                          Field.Index.TOKENIZED));
+                          Field.Index.ANALYZED));
         doc.add(new Field("content3", "", Field.Store.YES,
-                          Field.Index.TOKENIZED));
+                          Field.Index.ANALYZED));
         doc.add(new Field("content5", "", storeVal,
-                          Field.Index.TOKENIZED));
+                          Field.Index.ANALYZED));
       }
 
       for(int j=0;j<4;j++)
@@ -2178,7 +2178,7 @@
     RAMDirectory directory = new MockRAMDirectory();
 
     final Document doc = new Document();
-    Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
+    Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
     doc.add(idField);
 
     for(int pass=0;pass<3;pass++) {
@@ -2287,7 +2287,7 @@
     public void run() {
 
       final Document doc = new Document();
-      doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
 
       int idUpto = 0;
       int fullCount = 0;
@@ -2414,7 +2414,7 @@
     dir.setMaxSizeInBytes(dir.getRecomputedActualSizeInBytes());
     writer.setMaxBufferedDocs(2);
     final Document doc = new Document();
-    doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
     try {
       writer.addDocument(doc);
       fail("did not hit disk full");
@@ -2516,7 +2516,7 @@
     IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
     writer.setMaxBufferedDocs(2);
     final Document doc = new Document();
-    doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
 
     for(int i=0;i<6;i++)
       writer.addDocument(doc);
@@ -2721,7 +2721,7 @@
     for(int i=0;i<10000;i++)
       b.append(" a");
     b.append(" x");
-    doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.TOKENIZED));
+    doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
     writer.addDocument(doc);
     writer.close();
 
@@ -2876,7 +2876,7 @@
       document = new Document();
       document.add(storedField);
       Field termVectorField = new Field("termVector", "termVector",
-                                        Field.Store.NO, Field.Index.UN_TOKENIZED,
+                                        Field.Store.NO, Field.Index.NOT_ANALYZED,
                                         Field.TermVector.WITH_POSITIONS_OFFSETS);
 
       document.add(termVectorField);
@@ -2930,7 +2930,7 @@
       document = new Document();
       document.add(storedField);
       Field termVectorField = new Field("termVector", "termVector",
-                                        Field.Store.NO, Field.Index.UN_TOKENIZED,
+                                        Field.Store.NO, Field.Index.NOT_ANALYZED,
                                         Field.TermVector.WITH_POSITIONS_OFFSETS);
       document.add(termVectorField);
       writer.addDocument(document);
@@ -2964,7 +2964,7 @@
                                   Field.Index.NO);
     document.add(storedField);
     Field termVectorField = new Field("termVector", "termVector",
-                                      Field.Store.NO, Field.Index.UN_TOKENIZED,
+                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
                                       Field.TermVector.WITH_POSITIONS_OFFSETS);
     document.add(termVectorField);
     for(int i=0;i<10;i++)
@@ -3004,7 +3004,7 @@
     for(int i=0;i<10000;i++)
       b.append(" a");
     b.append(" x");
-    doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.TOKENIZED));
+    doc.add(new Field("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
     writer.addDocument(doc);
     writer.close();
 
@@ -3032,7 +3032,7 @@
                                   Field.Index.NO);
     document.add(storedField);
     Field termVectorField = new Field("termVector", "termVector",
-                                      Field.Store.NO, Field.Index.UN_TOKENIZED,
+                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
                                       Field.TermVector.WITH_POSITIONS_OFFSETS);
     document.add(termVectorField);
     for(int i=0;i<10;i++)
@@ -3079,7 +3079,7 @@
                                   Field.Index.NO);
     document.add(storedField);
     Field termVectorField = new Field("termVector", "termVector",
-                                      Field.Store.NO, Field.Index.UN_TOKENIZED,
+                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
                                       Field.TermVector.WITH_POSITIONS_OFFSETS);
     document.add(termVectorField);
     for(int i=0;i<98;i++)
@@ -3126,7 +3126,7 @@
                                   Field.Index.NO);
     document.add(storedField);
     Field termVectorField = new Field("termVector", "termVector",
-                                      Field.Store.NO, Field.Index.UN_TOKENIZED,
+                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
                                       Field.TermVector.WITH_POSITIONS_OFFSETS);
     document.add(termVectorField);
     for(int i=0;i<98;i++)
@@ -3160,7 +3160,7 @@
     MockRAMDirectory dir = new MockRAMDirectory();
     IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer());
     Document doc = new Document();
-    doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.TOKENIZED));
+    doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
     writer.addDocument(doc);
     writer.close();
   }
@@ -3186,7 +3186,7 @@
     MockIndexWriter w = new MockIndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     Document doc = new Document();
     doc.add(new Field("field", "a field", Field.Store.YES,
-                      Field.Index.TOKENIZED));
+                      Field.Index.ANALYZED));
     w.addDocument(doc);
     w.doFail = true;
     try {
@@ -3207,7 +3207,7 @@
     w.setMaxBufferedDocs(2);
     Document doc = new Document();
     doc.add(new Field("field", "a field", Field.Store.YES,
-                      Field.Index.TOKENIZED));
+                      Field.Index.ANALYZED));
     w.addDocument(doc);
 
     Analyzer analyzer = new Analyzer() {
@@ -3218,7 +3218,7 @@
 
     Document crashDoc = new Document();
     crashDoc.add(new Field("crash", "do it on token 4", Field.Store.YES,
-                           Field.Index.TOKENIZED));
+                           Field.Index.ANALYZED));
     try {
       w.addDocument(crashDoc, analyzer);
       fail("did not hit expected exception");
@@ -3258,7 +3258,7 @@
     w.setMergeScheduler(new ConcurrentMergeScheduler());
     Document doc = new Document();
     doc.add(new Field("field", "a field", Field.Store.YES,
-                      Field.Index.TOKENIZED));
+                      Field.Index.ANALYZED));
     for(int i=0;i<10;i++)
       try {
         w.addDocument(doc);
@@ -3291,7 +3291,7 @@
     MockIndexWriter3 w = new MockIndexWriter3(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     Document doc = new Document();
     doc.add(new Field("field", "a field", Field.Store.YES,
-                      Field.Index.TOKENIZED));
+                      Field.Index.ANALYZED));
     w.addDocument(doc);
     w.commit();
     assertTrue(w.wasCalled);
@@ -3343,7 +3343,7 @@
     IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
     Document doc = new Document();
     doc.add(new Field("field", "a field", Field.Store.YES,
-                      Field.Index.TOKENIZED));
+                      Field.Index.ANALYZED));
     w.addDocument(doc);
     dir.failOn(failure);
     try {
@@ -3393,7 +3393,7 @@
 
     final int count = utf8Data.length/2;
     for(int i=0;i<count;i++)
-      doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.TOKENIZED));
+      doc.add(new Field("f" + i, utf8Data[2*i], Field.Store.YES, Field.Index.ANALYZED));
     w.addDocument(doc);
     w.close();
 
@@ -4093,7 +4093,7 @@
           Document doc = new Document();
           doc.add(new Field("test1", "this is some data that will be compressed this this this", Field.Store.COMPRESS, Field.Index.NO));
           doc.add(new Field("test2", new byte[20], Field.Store.COMPRESS));
-          doc.add(new Field("field" + i, "random field", Field.Store.NO, Field.Index.TOKENIZED));
+          doc.add(new Field("field" + i, "random field", Field.Store.NO, Field.Index.ANALYZED));
           w.addDocument(doc);
         } finally {
           w.close();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Wed Sep 10 14:38:52 2008
@@ -52,14 +52,14 @@
       for (int i = 0; i < keywords.length; i++) {
         Document doc = new Document();
         doc.add(new Field("id", keywords[i], Field.Store.YES,
-                          Field.Index.UN_TOKENIZED));
+                          Field.Index.NOT_ANALYZED));
         doc.add(new Field("country", unindexed[i], Field.Store.YES,
                           Field.Index.NO));
         doc.add(new Field("contents", unstored[i], Field.Store.NO,
-                          Field.Index.TOKENIZED));
+                          Field.Index.ANALYZED));
         doc
           .add(new Field("city", text[i], Field.Store.YES,
-                         Field.Index.TOKENIZED));
+                         Field.Index.ANALYZED));
         modifier.addDocument(doc);
       }
       modifier.optimize();
@@ -267,11 +267,11 @@
   private void addDoc(IndexWriter modifier, int id, int value)
       throws IOException {
     Document doc = new Document();
-    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
+    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
     doc.add(new Field("id", String.valueOf(id), Field.Store.YES,
-        Field.Index.UN_TOKENIZED));
+        Field.Index.NOT_ANALYZED));
     doc.add(new Field("value", String.valueOf(value), Field.Store.NO,
-        Field.Index.UN_TOKENIZED));
+        Field.Index.NOT_ANALYZED));
     modifier.addDocument(doc);
   }
 
@@ -311,9 +311,9 @@
       for (int i = 0; i < 157; i++) {
         Document d = new Document();
         d.add(new Field("id", Integer.toString(i), Field.Store.YES,
-                        Field.Index.UN_TOKENIZED));
+                        Field.Index.NOT_ANALYZED));
         d.add(new Field("content", "aaa " + i, Field.Store.NO,
-                        Field.Index.TOKENIZED));
+                        Field.Index.ANALYZED));
         writer.addDocument(d);
       }
       writer.close();
@@ -383,9 +383,9 @@
                 if (updates) {
                   Document d = new Document();
                   d.add(new Field("id", Integer.toString(i), Field.Store.YES,
-                                  Field.Index.UN_TOKENIZED));
+                                  Field.Index.NOT_ANALYZED));
                   d.add(new Field("content", "bbb " + i, Field.Store.NO,
-                                  Field.Index.TOKENIZED));
+                                  Field.Index.ANALYZED));
                   modifier.updateDocument(new Term("id", Integer.toString(docId)), d);
                 } else { // deletes
                   modifier.deleteDocuments(new Term("id", Integer.toString(docId)));
@@ -546,13 +546,13 @@
       for (int i = 0; i < keywords.length; i++) {
         Document doc = new Document();
         doc.add(new Field("id", keywords[i], Field.Store.YES,
-                          Field.Index.UN_TOKENIZED));
+                          Field.Index.NOT_ANALYZED));
         doc.add(new Field("country", unindexed[i], Field.Store.YES,
                           Field.Index.NO));
         doc.add(new Field("contents", unstored[i], Field.Store.NO,
-                          Field.Index.TOKENIZED));
+                          Field.Index.ANALYZED));
         doc.add(new Field("city", text[i], Field.Store.YES,
-                          Field.Index.TOKENIZED));
+                          Field.Index.ANALYZED));
         modifier.addDocument(doc);
       }
       // flush (and commit if ac)
@@ -654,13 +654,13 @@
       for (int i = 0; i < keywords.length; i++) {
         Document doc = new Document();
         doc.add(new Field("id", keywords[i], Field.Store.YES,
-                          Field.Index.UN_TOKENIZED));
+                          Field.Index.NOT_ANALYZED));
         doc.add(new Field("country", unindexed[i], Field.Store.YES,
                           Field.Index.NO));
         doc.add(new Field("contents", unstored[i], Field.Store.NO,
-                          Field.Index.TOKENIZED));
+                          Field.Index.ANALYZED));
         doc.add(new Field("city", text[i], Field.Store.YES,
-                          Field.Index.TOKENIZED));
+                          Field.Index.ANALYZED));
         try {
           modifier.addDocument(doc);
         } catch (IOException io) {

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Wed Sep 10 14:38:52 2008
@@ -49,17 +49,17 @@
 
       final Document doc = new Document();
 
-      doc.add(new Field("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.TOKENIZED));
-      doc.add(new Field("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
-      doc.add(new Field("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.UN_TOKENIZED));
+      doc.add(new Field("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.ANALYZED));
+      doc.add(new Field("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(new Field("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NOT_ANALYZED));
       doc.add(new Field("content3", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NO));
 
-      doc.add(new Field("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.TOKENIZED));
-      doc.add(new Field("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.UN_TOKENIZED));
+      doc.add(new Field("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(new Field("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED));
 
-      doc.add(new Field("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.UN_TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+      doc.add(new Field("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
 
-      final Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
+      final Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
       doc.add(idField);
 
       final long stopTime = System.currentTimeMillis() + 3000;

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java Wed Sep 10 14:38:52 2008
@@ -199,7 +199,7 @@
 
   private void addDoc(IndexWriter writer) throws IOException {
     Document doc = new Document();
-    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
+    doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
     writer.addDocument(doc);
   }
 

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java Wed Sep 10 14:38:52 2008
@@ -99,7 +99,7 @@
     for (int i = start; i < (start + numDocs); i++)
     {
       Document temp = new Document();
-      temp.add(new Field("count", (""+i), Field.Store.YES, Field.Index.UN_TOKENIZED));
+      temp.add(new Field("count", (""+i), Field.Store.YES, Field.Index.NOT_ANALYZED));
 
       writer.addDocument(temp);
     }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java Wed Sep 10 14:38:52 2008
@@ -78,7 +78,7 @@
                             data[f % data.length] 
                             + '#' + data[r.nextInt(data.length)], 
                             Field.Store.YES, 
-                            Field.Index.TOKENIZED));
+                            Field.Index.ANALYZED));
         }
         writer.addDocument(doc);
       }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java Wed Sep 10 14:38:52 2008
@@ -64,7 +64,7 @@
                 content = this.term3 + " " + this.term2;
             }
 
-            doc.add(new Field(this.field, content, Field.Store.YES, Field.Index.TOKENIZED));
+            doc.add(new Field(this.field, content, Field.Store.YES, Field.Index.ANALYZED));
             writer.addDocument(doc);
         }
         
@@ -111,7 +111,7 @@
         IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
         for (int i = 0; i < 10; i++) {
             Document doc = new Document();
-            doc.add(new Field(this.field, "a b", Field.Store.YES, Field.Index.TOKENIZED));
+            doc.add(new Field(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED));
             writer.addDocument(doc);
         }
         

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java Wed Sep 10 14:38:52 2008
@@ -50,7 +50,7 @@
     Term term = new Term("test", "a");
     for (int i = 0; i < 5000; i++) {
       Document d1 = new Document();
-      d1.add(new Field(term.field(), term.text(), Store.NO, Index.TOKENIZED));
+      d1.add(new Field(term.field(), term.text(), Store.NO, Index.ANALYZED));
       writer.addDocument(d1);
     }
     writer.flush();

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiSegmentReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiSegmentReader.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiSegmentReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiSegmentReader.java Wed Sep 10 14:38:52 2008
@@ -152,7 +152,7 @@
   private void addDoc(RAMDirectory ramDir1, String s, boolean create) throws IOException {
     IndexWriter iw = new IndexWriter(ramDir1, new StandardAnalyzer(), create, IndexWriter.MaxFieldLength.LIMITED);
     Document doc = new Document();
-    doc.add(new Field("body", s, Field.Store.YES, Field.Index.TOKENIZED));
+    doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
     iw.addDocument(doc);
     iw.close();
   }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java Wed Sep 10 14:38:52 2008
@@ -210,7 +210,7 @@
     Document d = new Document();
     float boost = nextNorm();
     for (int i = 0; i < 10; i++) {
-      Field f = new Field("f"+i,"v"+i,Store.NO,Index.UN_TOKENIZED);
+      Field f = new Field("f"+i,"v"+i,Store.NO,Index.NOT_ANALYZED);
       f.setBoost(boost);
       d.add(f);
     }

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestOmitTf.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestOmitTf.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestOmitTf.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestOmitTf.java Wed Sep 10 14:38:52 2008
@@ -60,11 +60,11 @@
     Document d = new Document();
         
     // this field will have Tf
-    Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.TOKENIZED);
+    Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
     d.add(f1);
        
     // this field will NOT have Tf
-    Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.TOKENIZED);
+    Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
     f2.setOmitTf(true);
     d.add(f2);
         
@@ -109,11 +109,11 @@
     Document d = new Document();
         
     // this field will have Tf
-    Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.TOKENIZED);
+    Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
     d.add(f1);
        
     // this field will NOT have Tf
-    Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.TOKENIZED);
+    Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
     f2.setOmitTf(true);
     d.add(f2);
 
@@ -163,11 +163,11 @@
     Document d = new Document();
         
     // this field will have Tf
-    Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.TOKENIZED);
+    Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
     d.add(f1);
        
     // this field will NOT have Tf
-    Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.TOKENIZED);
+    Field f2 = new Field("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED);
     d.add(f2);
 
     for(int i=0;i<5;i++)
@@ -212,7 +212,7 @@
     writer.setUseCompoundFile(false);
     Document d = new Document();
         
-    Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.TOKENIZED);
+    Field f1 = new Field("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED);
     f1.setOmitTf(true);
     d.add(f1);
 
@@ -249,11 +249,11 @@
       Document d = new Document();
       sb.append(term).append(" ");
       String content  = sb.toString();
-      Field noTf = new Field("noTf", content + (i%2==0 ? "" : " notf"), Field.Store.NO, Field.Index.TOKENIZED);
+      Field noTf = new Field("noTf", content + (i%2==0 ? "" : " notf"), Field.Store.NO, Field.Index.ANALYZED);
       noTf.setOmitTf(true);
       d.add(noTf);
           
-      Field tf = new Field("tf", content + (i%2==0 ? " tf" : ""), Field.Store.NO, Field.Index.TOKENIZED);
+      Field tf = new Field("tf", content + (i%2==0 ? " tf" : ""), Field.Store.NO, Field.Index.ANALYZED);
       d.add(tf);
           
       writer.addDocument(d);

Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java?rev=694004&r1=694003&r2=694004&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java Wed Sep 10 14:38:52 2008
@@ -107,7 +107,7 @@
     Directory dir2 = new MockRAMDirectory();
     IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     Document d3 = new Document();
-    d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.TOKENIZED));
+    d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
     w2.addDocument(d3);
     w2.close();
     
@@ -152,13 +152,13 @@
     // add another document to ensure that the indexes are not optimized
     IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
     Document d = new Document();
-    d.add(new Field("f1", "v1", Field.Store.YES, Field.Index.TOKENIZED));
+    d.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
     modifier.addDocument(d);
     modifier.close();
     
     modifier = new IndexWriter(dir2, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
     d = new Document();
-    d.add(new Field("f2", "v2", Field.Store.YES, Field.Index.TOKENIZED));
+    d.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
     modifier.addDocument(d);
     modifier.close();
 
@@ -215,16 +215,16 @@
     Directory dir = new MockRAMDirectory();
     IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     Document d1 = new Document();
-    d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.TOKENIZED));
-    d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.TOKENIZED));
-    d1.add(new Field("f3", "v1", Field.Store.YES, Field.Index.TOKENIZED));
-    d1.add(new Field("f4", "v1", Field.Store.YES, Field.Index.TOKENIZED));
+    d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    d1.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    d1.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
     w.addDocument(d1);
     Document d2 = new Document();
-    d2.add(new Field("f1", "v2", Field.Store.YES, Field.Index.TOKENIZED));
-    d2.add(new Field("f2", "v2", Field.Store.YES, Field.Index.TOKENIZED));
-    d2.add(new Field("f3", "v2", Field.Store.YES, Field.Index.TOKENIZED));
-    d2.add(new Field("f4", "v2", Field.Store.YES, Field.Index.TOKENIZED));
+    d2.add(new Field("f1", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    d2.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    d2.add(new Field("f3", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    d2.add(new Field("f4", "v2", Field.Store.YES, Field.Index.ANALYZED));
     w.addDocument(d2);
     w.close();
 
@@ -245,12 +245,12 @@
     Directory dir1 = new MockRAMDirectory();
     IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     Document d1 = new Document();
-    d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.TOKENIZED));
-    d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.TOKENIZED));
+    d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
     w1.addDocument(d1);
     Document d2 = new Document();
-    d2.add(new Field("f1", "v2", Field.Store.YES, Field.Index.TOKENIZED));
-    d2.add(new Field("f2", "v2", Field.Store.YES, Field.Index.TOKENIZED));
+    d2.add(new Field("f1", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    d2.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
     w1.addDocument(d2);
     w1.close();
     return dir1;
@@ -260,12 +260,12 @@
     Directory dir2 = new RAMDirectory();
     IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
     Document d3 = new Document();
-    d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.TOKENIZED));
-    d3.add(new Field("f4", "v1", Field.Store.YES, Field.Index.TOKENIZED));
+    d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
+    d3.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
     w2.addDocument(d3);
     Document d4 = new Document();
-    d4.add(new Field("f3", "v2", Field.Store.YES, Field.Index.TOKENIZED));
-    d4.add(new Field("f4", "v2", Field.Store.YES, Field.Index.TOKENIZED));
+    d4.add(new Field("f3", "v2", Field.Store.YES, Field.Index.ANALYZED));
+    d4.add(new Field("f4", "v2", Field.Store.YES, Field.Index.ANALYZED));
     w2.addDocument(d4);
     w2.close();
     return dir2;