You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2011/08/27 15:27:12 UTC

svn commit: r1162347 [13/15] - in /lucene/dev/trunk: lucene/ lucene/contrib/ lucene/contrib/demo/src/java/org/apache/lucene/demo/ lucene/contrib/demo/src/java/org/apache/lucene/demo/xmlparser/ lucene/contrib/highlighter/src/java/org/apache/lucene/searc...

Modified: lucene/dev/trunk/modules/facet/src/java/org/apache/lucene/facet/taxonomy/lucene/LuceneTaxonomyWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/facet/src/java/org/apache/lucene/facet/taxonomy/lucene/LuceneTaxonomyWriter.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/facet/src/java/org/apache/lucene/facet/taxonomy/lucene/LuceneTaxonomyWriter.java (original)
+++ lucene/dev/trunk/modules/facet/src/java/org/apache/lucene/facet/taxonomy/lucene/LuceneTaxonomyWriter.java Sat Aug 27 13:27:01 2011
@@ -17,11 +17,11 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
@@ -179,10 +179,10 @@ public class LuceneTaxonomyWriter implem
     openLuceneIndex(directory, openMode);
     reader = null;
 
-    parentStreamField = new Field(Consts.FIELD_PAYLOADS, parentStream);
-    parentStreamField.setOmitNorms(true);
-    fullPathField = new Field(Consts.FULL, "", Store.YES, Index.NOT_ANALYZED_NO_NORMS);
-    fullPathField.setIndexOptions(IndexOptions.DOCS_ONLY);
+    FieldType ft = new FieldType(TextField.TYPE_UNSTORED);
+    ft.setOmitNorms(true);
+    parentStreamField = new Field(Consts.FIELD_PAYLOADS, ft, parentStream);
+    fullPathField = new Field(Consts.FULL, StringField.TYPE_STORED, "");
 
     this.nextID = indexWriter.maxDoc();
 

Modified: lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java (original)
+++ lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java Sat Aug 27 13:27:01 2011
@@ -15,9 +15,7 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.IndexReader;
@@ -247,7 +245,7 @@ public abstract class FacetTestBase exte
     CategoryDocumentBuilder builder = new CategoryDocumentBuilder(tw, iParams);
     builder.setCategoryPaths(categories);
     builder.build(d);
-    d.add(new Field("content", content, Store.YES, Index.ANALYZED, TermVector.NO));
+    d.add(new Field("content", TextField.TYPE_STORED, content));
     iw.addDocument(d);
   }
   

Modified: lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java (original)
+++ lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/FacetTestUtils.java Sat Aug 27 13:27:01 2011
@@ -7,9 +7,7 @@ import java.util.Collection;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -130,8 +128,7 @@ public class FacetTestUtils {
     cps.add(cp);
     Document d = new Document();
     new CategoryDocumentBuilder(tw, iParams).setCategoryPaths(cps).build(d);
-    d.add(new Field("content", "alpha", Store.YES, Index.ANALYZED,
-        TermVector.NO));
+    d.add(new Field("content", TextField.TYPE_STORED, "alpha"));
     iw.addDocument(d);
   }
 

Modified: lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java (original)
+++ lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java Sat Aug 27 13:27:01 2011
@@ -12,7 +12,7 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Payload;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -21,8 +21,6 @@ import org.apache.lucene.store.Directory
 import org.junit.Test;
 
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.facet.search.CategoryListIterator;
-import org.apache.lucene.facet.search.PayloadIntDecodingIterator;
 import org.apache.lucene.util.UnsafeByteArrayOutputStream;
 import org.apache.lucene.util.encoding.DGapIntEncoder;
 import org.apache.lucene.util.encoding.IntEncoder;
@@ -104,7 +102,7 @@ public class CategoryListIteratorTest ex
     for (int i = 0; i < data.length; i++) {
       dts.setIdx(i);
       Document doc = new Document();
-      doc.add(new Field("f", dts));
+      doc.add(new TextField("f", dts));
       writer.addDocument(doc);
     }
     IndexReader reader = writer.getReader();
@@ -155,10 +153,10 @@ public class CategoryListIteratorTest ex
       dts.setIdx(i);
       Document doc = new Document();
       if (i==0 || i == 2) {
-        doc.add(new Field("f", dts)); // only docs 0 & 2 have payloads!
+        doc.add(new TextField("f", dts)); // only docs 0 & 2 have payloads!
       }
       dts2.setIdx(i);
-      doc.add(new Field("f", dts2));
+      doc.add(new TextField("f", dts2));
       writer.addDocument(doc);
       writer.commit();
     }
@@ -168,7 +166,7 @@ public class CategoryListIteratorTest ex
     for (int i = 0; i < 10; ++i) {
       Document d = new Document();
       dts.setIdx(2);
-      d.add(new Field("f", dts2));
+      d.add(new TextField("f", dts2));
       writer.addDocument(d);
       if (i %10 == 0) {
         writer.commit();

Modified: lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java (original)
+++ lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java Sat Aug 27 13:27:01 2011
@@ -6,12 +6,9 @@ import java.util.ArrayList;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
@@ -28,7 +25,6 @@ import org.apache.lucene.util.LuceneTest
 import org.apache.lucene.facet.index.CategoryDocumentBuilder;
 import org.apache.lucene.facet.index.params.CategoryListParams;
 import org.apache.lucene.facet.index.params.PerDimensionIndexingParams;
-import org.apache.lucene.facet.search.DrillDown;
 import org.apache.lucene.facet.search.params.FacetSearchParams;
 import org.apache.lucene.facet.taxonomy.CategoryPath;
 import org.apache.lucene.facet.taxonomy.TaxonomyWriter;
@@ -84,10 +80,10 @@ public class DrillDownTest extends Lucen
       ArrayList<CategoryPath> paths = new ArrayList<CategoryPath>();
       Document doc = new Document();
       if (i % 2 == 0) { // 50
-        doc.add(new Field("content", "foo", Store.NO, Index.ANALYZED));
+        doc.add(new TextField("content", "foo"));
       }
       if (i % 3 == 0) { // 33
-        doc.add(new Field("content", "bar", Store.NO, Index.ANALYZED));
+        doc.add(new TextField("content", "bar"));
       }
       if (i % 4 == 0) { // 25
         paths.add(new CategoryPath("a"));

Modified: lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java (original)
+++ lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java Sat Aug 27 13:27:01 2011
@@ -7,9 +7,7 @@ import java.util.List;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@@ -24,11 +22,6 @@ import org.junit.Test;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.facet.index.CategoryDocumentBuilder;
 import org.apache.lucene.facet.index.params.DefaultFacetIndexingParams;
-import org.apache.lucene.facet.search.FacetsAccumulator;
-import org.apache.lucene.facet.search.FloatArrayAllocator;
-import org.apache.lucene.facet.search.IntArrayAllocator;
-import org.apache.lucene.facet.search.ScoredDocIdCollector;
-import org.apache.lucene.facet.search.StandardFacetsAccumulator;
 import org.apache.lucene.facet.search.params.CountFacetRequest;
 import org.apache.lucene.facet.search.params.FacetSearchParams;
 import org.apache.lucene.facet.search.params.FacetRequest.ResultMode;
@@ -335,7 +328,7 @@ public class TestTopKInEachNodeResultHan
     cps.add(cp);
     Document d = new Document();
     new CategoryDocumentBuilder(tw, iParams).setCategoryPaths(cps).build(d);
-    d.add(new Field("content", "alpha", Store.YES, Index.ANALYZED, TermVector.NO));
+    d.add(new Field("content", TextField.TYPE_STORED, "alpha"));
     iw.addDocument(d);
   }
 

Modified: lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java (original)
+++ lucene/dev/trunk/modules/facet/src/test/org/apache/lucene/facet/util/TestScoredDocIDsUtils.java Sat Aug 27 13:27:01 2011
@@ -7,8 +7,8 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -134,7 +134,7 @@ public class TestScoredDocIDsUtils exten
         int docNum = it.getDocID();
         assertNull(
             "Deleted docs must not appear in the allDocsScoredDocIds set: " + docNum, 
-            reader.document(docNum).getFieldable("del"));
+            reader.document(docNum).getField("del"));
       }
 
       assertEquals("Wrong number of (live) documents", allDocs.size(), numIteratedDocs);
@@ -166,7 +166,7 @@ public class TestScoredDocIDsUtils exten
             live != null && !live.get(docNum));
         assertNull(
             "Complement-Set must not contain docs from the original set (doc="+ docNum+")",
-            reader.document(docNum).getFieldable("del"));
+            reader.document(docNum).getField("del"));
         assertFalse(
             "Complement-Set must not contain docs from the original set (doc="+docNum+")",
             resultSet.fastGet(docNum));
@@ -189,8 +189,8 @@ public class TestScoredDocIDsUtils exten
     protected final static String delTxt = "delete";
     protected final static String alphaTxt = "alpha";
     
-    private final static Field deletionMark = new Field(field, delTxt, Store.NO, Index.NOT_ANALYZED_NO_NORMS);
-    private final static Field alphaContent = new Field(field, alphaTxt, Store.NO, Index.NOT_ANALYZED_NO_NORMS);
+    private final static Field deletionMark = new StringField(field, delTxt);
+    private final static Field alphaContent = new StringField(field, alphaTxt);
     
     protected final int numDocs;
     
@@ -208,7 +208,9 @@ public class TestScoredDocIDsUtils exten
         doc.add(deletionMark);
         // Add a special field for docs that are marked for deletion. Later we
         // assert that those docs are not returned by all-scored-doc-IDs.
-        doc.add(new Field("del", Integer.toString(docNum), Store.YES, Index.NO));
+        FieldType ft = new FieldType();
+        ft.setStored(true);
+        doc.add(new Field("del", ft, Integer.toString(docNum)));
       }
 
       if (haveAlpha(docNum)) {

Modified: lucene/dev/trunk/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html (original)
+++ lucene/dev/trunk/modules/grouping/src/java/org/apache/lucene/search/grouping/package.html Sat Aug 27 13:27:01 2011
@@ -130,7 +130,7 @@ field fall into a single group.</p>
   List&lt;Document&gt; oneGroup = ...;
   
   Field groupEndField = new Field("groupEnd", "x", Field.Store.NO, Field.Index.NOT_ANALYZED);
-  groupEndField.setOmitTermFreqAndPositions(true);
+  groupEndField.setIndexOptions(IndexOptions.DOCS_ONLY);
   groupEndField.setOmitNorms(true);
   oneGroup.get(oneGroup.size()-1).add(groupEndField);
 

Modified: lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupHeadsCollectorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupHeadsCollectorTest.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupHeadsCollectorTest.java (original)
+++ lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupHeadsCollectorTest.java Sat Aug 27 13:27:01 2011
@@ -21,6 +21,8 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.NumericField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -47,57 +49,57 @@ public class TermAllGroupHeadsCollectorT
 
     // 0
     Document doc = new Document();
-    doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "random text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField(groupField, "author1", TextField.TYPE_STORED));
+    doc.add(newField("content", "random text", TextField.TYPE_STORED));
+    doc.add(newField("id", "1", StringField.TYPE_STORED));
     w.addDocument(doc);
 
     // 1
     doc = new Document();
-    doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some more random text blob", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "2", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField(groupField, "author1", TextField.TYPE_STORED));
+    doc.add(newField("content", "some more random text blob", TextField.TYPE_STORED));
+    doc.add(newField("id", "2", StringField.TYPE_STORED));
     w.addDocument(doc);
 
     // 2
     doc = new Document();
-    doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some more random textual data", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "3", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField(groupField, "author1", TextField.TYPE_STORED));
+    doc.add(newField("content", "some more random textual data", TextField.TYPE_STORED));
+    doc.add(newField("id", "3", StringField.TYPE_STORED));
     w.addDocument(doc);
     w.commit(); // To ensure a second segment
 
     // 3
     doc = new Document();
-    doc.add(new Field(groupField, "author2", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some random text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "4", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField(groupField, "author2", TextField.TYPE_STORED));
+    doc.add(newField("content", "some random text", TextField.TYPE_STORED));
+    doc.add(newField("id", "4", StringField.TYPE_STORED));
     w.addDocument(doc);
 
     // 4
     doc = new Document();
-    doc.add(new Field(groupField, "author3", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some more random text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "5", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField(groupField, "author3", TextField.TYPE_STORED));
+    doc.add(newField("content", "some more random text", TextField.TYPE_STORED));
+    doc.add(newField("id", "5", StringField.TYPE_STORED));
     w.addDocument(doc);
 
     // 5
     doc = new Document();
-    doc.add(new Field(groupField, "author3", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "random blob", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "6", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField(groupField, "author3", TextField.TYPE_STORED));
+    doc.add(newField("content", "random blob", TextField.TYPE_STORED));
+    doc.add(newField("id", "6", StringField.TYPE_STORED));
     w.addDocument(doc);
 
     // 6 -- no author field
     doc = new Document();
-    doc.add(new Field("content", "random word stuck in alot of other text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "6", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField("content", "random word stuck in alot of other text", TextField.TYPE_STORED));
+    doc.add(newField("id", "6", StringField.TYPE_STORED));
     w.addDocument(doc);
 
     // 7 -- no author field
     doc = new Document();
-    doc.add(new Field("content", "random word stuck in alot of other text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "7", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(newField("content", "random word stuck in alot of other text", TextField.TYPE_STORED));
+    doc.add(newField("id", "7", StringField.TYPE_STORED));
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
@@ -182,18 +184,18 @@ public class TermAllGroupHeadsCollectorT
 
       Document doc = new Document();
       Document docNoGroup = new Document();
-      Field group = newField("group", "", Field.Index.NOT_ANALYZED);
+      Field group = newField("group", "", StringField.TYPE_UNSTORED);
       doc.add(group);
-      Field sort1 = newField("sort1", "", Field.Index.NOT_ANALYZED);
+      Field sort1 = newField("sort1", "", StringField.TYPE_UNSTORED);
       doc.add(sort1);
       docNoGroup.add(sort1);
-      Field sort2 = newField("sort2", "", Field.Index.NOT_ANALYZED);
+      Field sort2 = newField("sort2", "", StringField.TYPE_UNSTORED);
       doc.add(sort2);
       docNoGroup.add(sort2);
-      Field sort3 = newField("sort3", "", Field.Index.NOT_ANALYZED);
+      Field sort3 = newField("sort3", "", StringField.TYPE_UNSTORED);
       doc.add(sort3);
       docNoGroup.add(sort3);
-      Field content = newField("content", "", Field.Index.ANALYZED);
+      Field content = newField("content", "", TextField.TYPE_UNSTORED);
       doc.add(content);
       docNoGroup.add(content);
       NumericField id = new NumericField("id");

Modified: lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java (original)
+++ lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java Sat Aug 27 13:27:01 2011
@@ -5,7 +5,7 @@ package org.apache.lucene.search.groupin
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
  * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
+ * (the "License")); you may not use this file except in compliance with
  * the License.  You may obtain a copy of the License at
  *
  *     http://www.apache.org/licenses/LICENSE-2.0
@@ -20,6 +20,8 @@ package org.apache.lucene.search.groupin
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.IndexSearcher;
@@ -32,6 +34,8 @@ public class TermAllGroupsCollectorTest 
   public void testTotalGroupCount() throws Exception {
 
     final String groupField = "author";
+    FieldType customType = new FieldType();
+    customType.setStored(true);
 
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(
@@ -41,51 +45,51 @@ public class TermAllGroupsCollectorTest 
                                                     new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
     // 0
     Document doc = new Document();
-    doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "random text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "1", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "random text"));
+    doc.add(new Field("id", customType, "1"));
     w.addDocument(doc);
 
     // 1
     doc = new Document();
-    doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some more random text blob", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "2", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "some more random text blob"));
+    doc.add(new Field("id", customType, "2"));
     w.addDocument(doc);
 
     // 2
     doc = new Document();
-    doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some more random textual data", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "3", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "some more random textual data"));
+    doc.add(new Field("id", customType, "3"));
     w.addDocument(doc);
     w.commit(); // To ensure a second segment
 
     // 3
     doc = new Document();
-    doc.add(new Field(groupField, "author2", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some random text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "4", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author2"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "some random text"));
+    doc.add(new Field("id", customType, "4"));
     w.addDocument(doc);
 
     // 4
     doc = new Document();
-    doc.add(new Field(groupField, "author3", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some more random text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "5", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author3"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "some more random text"));
+    doc.add(new Field("id", customType, "5"));
     w.addDocument(doc);
 
     // 5
     doc = new Document();
-    doc.add(new Field(groupField, "author3", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "random blob", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "6", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author3"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "random blob"));
+    doc.add(new Field("id", customType, "6"));
     w.addDocument(doc);
 
     // 6 -- no author field
     doc = new Document();
-    doc.add(new Field("content", "random word stuck in alot of other text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "6", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field("content", TextField.TYPE_STORED, "random word stuck in alot of other text"));
+    doc.add(new Field("id", customType, "6"));
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = new IndexSearcher(w.getReader());

Modified: lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (original)
+++ lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java Sat Aug 27 13:27:01 2011
@@ -23,8 +23,11 @@ import java.util.*;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
 import org.apache.lucene.document.NumericField;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -47,6 +50,9 @@ public class TestGrouping extends Lucene
 
     final String groupField = "author";
 
+    FieldType customType = new FieldType();
+    customType.setStored(true);
+    
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(
                                random,
@@ -55,50 +61,50 @@ public class TestGrouping extends Lucene
                                                     new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
     // 0
     Document doc = new Document();
-    doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "random text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "1", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "random text"));
+    doc.add(new Field("id", customType, "1"));
     w.addDocument(doc);
 
     // 1
     doc = new Document();
-    doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some more random text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "2", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "some more random text"));
+    doc.add(new Field("id", customType, "2"));
     w.addDocument(doc);
 
     // 2
     doc = new Document();
-    doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some more random textual data", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "3", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "some more random textual data"));
+    doc.add(new Field("id", customType, "3"));
     w.addDocument(doc);
 
     // 3
     doc = new Document();
-    doc.add(new Field(groupField, "author2", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some random text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "4", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author2"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "some random text"));
+    doc.add(new Field("id", customType, "4"));
     w.addDocument(doc);
 
     // 4
     doc = new Document();
-    doc.add(new Field(groupField, "author3", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "some more random text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "5", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author3"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "some more random text"));
+    doc.add(new Field("id", customType, "5"));
     w.addDocument(doc);
 
     // 5
     doc = new Document();
-    doc.add(new Field(groupField, "author3", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("content", "random", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "6", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field(groupField, TextField.TYPE_STORED, "author3"));
+    doc.add(new Field("content", TextField.TYPE_STORED, "random"));
+    doc.add(new Field("id", customType, "6"));
     w.addDocument(doc);
 
     // 6 -- no author field
     doc = new Document();
-    doc.add(new Field("content", "random word stuck in alot of other text", Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(new Field("id", "6", Field.Store.YES, Field.Index.NO));
+    doc.add(new Field("content", TextField.TYPE_STORED,  "random word stuck in alot of other text"));
+    doc.add(new Field("id", customType, "6"));
     w.addDocument(doc);
 
     IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
@@ -386,18 +392,19 @@ public class TestGrouping extends Lucene
         Document doc = new Document();
         docs.add(doc);
         if (groupValue.group != null) {
-          doc.add(newField("group", groupValue.group.utf8ToString(), Field.Index.NOT_ANALYZED));
+          doc.add(newField("group", groupValue.group.utf8ToString(), StringField.TYPE_UNSTORED));
         }
-        doc.add(newField("sort1", groupValue.sort1.utf8ToString(), Field.Index.NOT_ANALYZED));
-        doc.add(newField("sort2", groupValue.sort2.utf8ToString(), Field.Index.NOT_ANALYZED));
+        doc.add(newField("sort1", groupValue.sort1.utf8ToString(), StringField.TYPE_UNSTORED));
+        doc.add(newField("sort2", groupValue.sort2.utf8ToString(), StringField.TYPE_UNSTORED));
         doc.add(new NumericField("id").setIntValue(groupValue.id));
-        doc.add(newField("content", groupValue.content, Field.Index.ANALYZED));
+        doc.add(newField("content", groupValue.content, TextField.TYPE_UNSTORED));
         //System.out.println("TEST:     doc content=" + groupValue.content + " group=" + (groupValue.group == null ? "null" : groupValue.group.utf8ToString()) + " sort1=" + groupValue.sort1.utf8ToString() + " id=" + groupValue.id);
       }
       // So we can pull filter marking last doc in block:
-      final Field groupEnd = newField("groupend", "x", Field.Index.NOT_ANALYZED);
-      groupEnd.setIndexOptions(IndexOptions.DOCS_ONLY);
-      groupEnd.setOmitNorms(true);
+      FieldType ft = new FieldType(StringField.TYPE_UNSTORED);
+      ft.setIndexOptions(IndexOptions.DOCS_ONLY);
+      ft.setOmitNorms(true);
+      final Field groupEnd = newField("groupend", "x", ft);
       docs.get(docs.size()-1).add(groupEnd);
       // Add as a doc block:
       w.addDocuments(docs);
@@ -497,15 +504,15 @@ public class TestGrouping extends Lucene
 
       Document doc = new Document();
       Document docNoGroup = new Document();
-      Field group = newField("group", "", Field.Index.NOT_ANALYZED);
+      Field group = newField("group", "", StringField.TYPE_UNSTORED);
       doc.add(group);
-      Field sort1 = newField("sort1", "", Field.Index.NOT_ANALYZED);
+      Field sort1 = newField("sort1", "", StringField.TYPE_UNSTORED);
       doc.add(sort1);
       docNoGroup.add(sort1);
-      Field sort2 = newField("sort2", "", Field.Index.NOT_ANALYZED);
+      Field sort2 = newField("sort2", "", StringField.TYPE_UNSTORED);
       doc.add(sort2);
       docNoGroup.add(sort2);
-      Field content = newField("content", "", Field.Index.ANALYZED);
+      Field content = newField("content", "", TextField.TYPE_UNSTORED);
       doc.add(content);
       docNoGroup.add(content);
       NumericField id = new NumericField("id");

Modified: lucene/dev/trunk/modules/join/src/test/org/apache/lucene/search/TestBlockJoin.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/join/src/test/org/apache/lucene/search/TestBlockJoin.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/join/src/test/org/apache/lucene/search/TestBlockJoin.java (original)
+++ lucene/dev/trunk/modules/join/src/test/org/apache/lucene/search/TestBlockJoin.java Sat Aug 27 13:27:01 2011
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.NumericField;
+import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -42,16 +43,16 @@ public class TestBlockJoin extends Lucen
   // One resume...
   private Document makeResume(String name, String country) {
     Document resume = new Document();
-    resume.add(newField("docType", "resume", Field.Index.NOT_ANALYZED));
-    resume.add(newField("name", name, Field.Store.YES, Field.Index.NOT_ANALYZED));
-    resume.add(newField("country", country, Field.Index.NOT_ANALYZED));
+    resume.add(newField("docType", "resume", StringField.TYPE_UNSTORED));
+    resume.add(newField("name", name, StringField.TYPE_STORED));
+    resume.add(newField("country", country, StringField.TYPE_UNSTORED));
     return resume;
   }
 
   // ... has multiple jobs
   private Document makeJob(String skill, int year) {
     Document job = new Document();
-    job.add(newField("skill", skill, Field.Store.YES, Field.Index.NOT_ANALYZED));
+    job.add(newField("skill", skill, StringField.TYPE_STORED));
     job.add(new NumericField("year").setIntValue(year));
     return job;
   }
@@ -188,15 +189,15 @@ public class TestBlockJoin extends Lucen
     for(int parentDocID=0;parentDocID<numParentDocs;parentDocID++) {
       Document parentDoc = new Document();
       Document parentJoinDoc = new Document();
-      Field id = newField("parentID", ""+parentDocID, Field.Store.YES, Field.Index.NOT_ANALYZED);
+      Field id = newField("parentID", ""+parentDocID, StringField.TYPE_STORED);
       parentDoc.add(id);
       parentJoinDoc.add(id);
-      parentJoinDoc.add(newField("isParent", "x", Field.Index.NOT_ANALYZED));
+      parentJoinDoc.add(newField("isParent", "x", StringField.TYPE_UNSTORED));
       for(int field=0;field<parentFields.length;field++) {
         if (random.nextDouble() < 0.9) {
           Field f = newField("parent" + field,
                              parentFields[field][random.nextInt(parentFields[field].length)],
-                             Field.Index.NOT_ANALYZED);
+                             StringField.TYPE_UNSTORED);
           parentDoc.add(f);
           parentJoinDoc.add(f);
         }
@@ -215,7 +216,7 @@ public class TestBlockJoin extends Lucen
         Document joinChildDoc = new Document();
         joinDocs.add(joinChildDoc);
 
-        Field childID = newField("childID", ""+childDocID, Field.Store.YES, Field.Index.NOT_ANALYZED);
+        Field childID = newField("childID", ""+childDocID, StringField.TYPE_STORED);
         childDoc.add(childID);
         joinChildDoc.add(childID);
 
@@ -223,7 +224,7 @@ public class TestBlockJoin extends Lucen
           if (random.nextDouble() < 0.9) {
             Field f = newField("child" + childFieldID,
                                childFields[childFieldID][random.nextInt(childFields[childFieldID].length)],
-                               Field.Index.NOT_ANALYZED);
+                               StringField.TYPE_UNSTORED);
             childDoc.add(f);
             joinChildDoc.add(f);
           }

Modified: lucene/dev/trunk/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java (original)
+++ lucene/dev/trunk/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java Sat Aug 27 13:27:01 2011
@@ -20,16 +20,15 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermFreqVector;
 import org.apache.lucene.search.*;
-import org.apache.lucene.store.FSDirectory;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.CharsRef;
 import org.apache.lucene.util.PriorityQueue;
 
 import java.io.*;
-import java.net.URL;
 import java.util.*;
 
 
@@ -705,10 +704,13 @@ public final class MoreLikeThis {
       // field does not store term vector info
       if (vector == null) {
         Document d = ir.document(docNum);
-        String text[] = d.getValues(fieldName);
-        if (text != null) {
-          for (int j = 0; j < text.length; j++) {
-            addTermFrequencies(new StringReader(text[j]), termFreqMap, fieldName);
+        IndexableField fields[] = d.getFields(fieldName);
+        if (fields != null) {
+          for (int j = 0; j < fields.length; j++) {
+            final String stringValue = fields[j].stringValue();
+            if (stringValue != null) {
+              addTermFrequencies(new StringReader(stringValue), termFreqMap, fieldName);
+            }
           }
         }
       } else {

Modified: lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java (original)
+++ lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java Sat Aug 27 13:27:01 2011
@@ -20,7 +20,7 @@ package org.apache.lucene.queries;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -64,10 +64,10 @@ public class BooleanFilterTest extends L
 
   private void addDoc(RandomIndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException {
     Document doc = new Document();
-    doc.add(newField("accessRights", accessRights, Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(newField("price", price, Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(newField("date", date, Field.Store.YES, Field.Index.ANALYZED));
-    doc.add(newField("inStock", inStock, Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("accessRights", accessRights, TextField.TYPE_STORED));
+    doc.add(newField("price", price, TextField.TYPE_STORED));
+    doc.add(newField("date", date, TextField.TYPE_STORED));
+    doc.add(newField("inStock", inStock, TextField.TYPE_STORED));
     writer.addDocument(doc);
   }
 

Modified: lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java (original)
+++ lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java Sat Aug 27 13:27:01 2011
@@ -21,7 +21,7 @@ import java.util.Calendar;
 import java.util.GregorianCalendar;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -29,10 +29,10 @@ import org.apache.lucene.search.BooleanC
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.CachingWrapperFilter;
 import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.MatchAllDocsQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryWrapperFilter;
-import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TermRangeFilter;
 import org.apache.lucene.search.TopDocs;
@@ -62,9 +62,9 @@ public class ChainedFilterTest extends L
 
     for (int i = 0; i < MAX; i++) {
       Document doc = new Document();
-      doc.add(newField("key", "" + (i + 1), Field.Store.YES, Field.Index.NOT_ANALYZED));
-      doc.add(newField("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.NOT_ANALYZED));
-      doc.add(newField("date", cal.getTime().toString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
+      doc.add(newField("key", "" + (i + 1), StringField.TYPE_STORED));
+      doc.add(newField("owner", (i < MAX / 2) ? "bob" : "sue", StringField.TYPE_STORED));
+      doc.add(newField("date", cal.getTime().toString(), StringField.TYPE_STORED));
       writer.addDocument(doc);
 
       cal.add(Calendar.DATE, 1);

Modified: lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java (original)
+++ lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java Sat Aug 27 13:27:01 2011
@@ -17,8 +17,10 @@ package org.apache.lucene.queries;
  * limitations under the License.
  */
 
+import java.util.HashSet;
+
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -29,8 +31,6 @@ import org.apache.lucene.store.Directory
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.LuceneTestCase;
 
-import java.util.HashSet;
-
 public class TermsFilterTest extends LuceneTestCase {
 
   public void testCachability() throws Exception {
@@ -57,7 +57,7 @@ public class TermsFilterTest extends Luc
     for (int i = 0; i < 100; i++) {
       Document doc = new Document();
       int term = i * 10; //terms are units of 10;
-      doc.add(newField(fieldName, "" + term, Field.Store.YES, Field.Index.NOT_ANALYZED));
+      doc.add(newField(fieldName, "" + term, StringField.TYPE_STORED));
       w.addDocument(doc);
     }
     IndexReader reader = new SlowMultiReaderWrapper(w.getReader());

Modified: lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java (original)
+++ lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java Sat Aug 27 13:27:01 2011
@@ -4,9 +4,10 @@ import org.apache.lucene.analysis.Analyz
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Fieldable;
-import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.queries.function.valuesource.ByteFieldSource;
 import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
 import org.apache.lucene.queries.function.valuesource.IntFieldSource;
@@ -128,23 +129,25 @@ public abstract class FunctionTestSetup 
 
   private static void addDoc(RandomIndexWriter iw, int i) throws Exception {
     Document d = new Document();
-    Fieldable f;
+    Field f;
     int scoreAndID = i + 1;
 
-    f = newField(ID_FIELD, id2String(scoreAndID), Field.Store.YES, Field.Index.NOT_ANALYZED); // for debug purposes
-    f.setOmitNorms(true);
+    FieldType customType = new FieldType(TextField.TYPE_STORED);
+    customType.setTokenized(false);
+    customType.setOmitNorms(true);
+    
+    f = newField(ID_FIELD, id2String(scoreAndID), customType); // for debug purposes
     d.add(f);
 
-    f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), Field.Store.NO, Field.Index.ANALYZED); // for regular search
-    f.setOmitNorms(true);
+    FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED);
+    customType2.setOmitNorms(true);
+    f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), customType2); // for regular search
     d.add(f);
 
-    f = newField(INT_FIELD, "" + scoreAndID, Field.Store.NO, Field.Index.NOT_ANALYZED); // for function scoring
-    f.setOmitNorms(true);
+    f = newField(INT_FIELD, "" + scoreAndID, customType); // for function scoring
     d.add(f);
 
-    f = newField(FLOAT_FIELD, scoreAndID + ".000", Field.Store.NO, Field.Index.NOT_ANALYZED); // for function scoring
-    f.setOmitNorms(true);
+    f = newField(FLOAT_FIELD, scoreAndID + ".000", customType); // for function scoring
     d.add(f);
 
     iw.addDocument(d);

Modified: lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java (original)
+++ lucene/dev/trunk/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java Sat Aug 27 13:27:01 2011
@@ -26,7 +26,7 @@ import java.util.Map;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.BooleanClause;
@@ -66,7 +66,7 @@ public class TestMoreLikeThis extends Lu
   
   private void addDoc(RandomIndexWriter writer, String text) throws IOException {
     Document doc = new Document();
-    doc.add(newField("text", text, Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("text", text, TextField.TYPE_STORED));
     writer.addDocument(doc);
   }
   

Modified: lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java (original)
+++ lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java Sat Aug 27 13:27:01 2011
@@ -26,7 +26,7 @@ import org.apache.lucene.analysis.Analyz
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.IndexSearcher;
@@ -284,7 +284,7 @@ public class TestMultiFieldQueryParser e
     Directory ramDir = newDirectory();
     IndexWriter iw =  new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
     Document doc = new Document();
-    doc.add(newField("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("body", "blah the footest blah", TextField.TYPE_UNSTORED));
     iw.addDocument(doc);
     iw.close();
     

Modified: lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java (original)
+++ lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java Sat Aug 27 13:27:01 2011
@@ -35,5 +35,4 @@ public class TestQueryParser extends Que
     qp.setDefaultOperator(QueryParser.OR_OPERATOR);
     return qp;
   }
-  
 }

Modified: lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java (original)
+++ lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java Sat Aug 27 13:27:01 2011
@@ -22,7 +22,7 @@ import java.util.HashSet;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.queryparser.classic.QueryParser;
 import org.apache.lucene.search.IndexSearcher;
@@ -115,10 +115,8 @@ public class TestComplexPhraseQuery exte
     IndexWriter w = new IndexWriter(rd, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
     for (int i = 0; i < docsContent.length; i++) {
       Document doc = new Document();
-      doc.add(newField("name", docsContent[i].name, Field.Store.YES,
-          Field.Index.ANALYZED));
-      doc.add(newField("id", docsContent[i].id, Field.Store.YES,
-          Field.Index.ANALYZED));
+      doc.add(newField("name", docsContent[i].name, TextField.TYPE_STORED));
+      doc.add(newField("id", docsContent[i].id, TextField.TYPE_STORED));
       w.addDocument(doc);
     }
     w.close();

Modified: lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java (original)
+++ lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java Sat Aug 27 13:27:01 2011
@@ -25,11 +25,10 @@ import org.apache.lucene.analysis.Analyz
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
 import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler;
-import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.Operator;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
@@ -321,8 +320,7 @@ public class TestMultiFieldQPHelper exte
     Directory ramDir = newDirectory();
     IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
     Document doc = new Document();
-    doc.add(newField("body", "blah the footest blah", Field.Store.NO,
-        Field.Index.ANALYZED));
+    doc.add(newField("body", "blah the footest blah", TextField.TYPE_UNSTORED));
     iw.addDocument(doc);
     iw.close();
 

Modified: lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java (original)
+++ lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java Sat Aug 27 13:27:01 2011
@@ -33,7 +33,6 @@ import java.util.TimeZone;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
 import org.apache.lucene.document.NumericField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
@@ -194,8 +193,7 @@ public class TestNumericQueryParser exte
       numericConfigMap.put(type.name(), new NumericConfig(PRECISION_STEP,
           NUMBER_FORMAT, type));
       
-      NumericField field = new NumericField(type.name(), PRECISION_STEP,
-          Field.Store.YES, true);
+      NumericField field = new NumericField(type.name(), PRECISION_STEP, NumericField.TYPE_STORED);
       
       numericFieldMap.put(type.name(), field);
       doc.add(field);
@@ -204,8 +202,7 @@ public class TestNumericQueryParser exte
     
     numericConfigMap.put(DATE_FIELD_NAME, new NumericConfig(PRECISION_STEP,
         DATE_FORMAT, NumericField.DataType.LONG));
-    NumericField dateField = new NumericField(DATE_FIELD_NAME, PRECISION_STEP,
-        Field.Store.YES, true);
+    NumericField dateField = new NumericField(DATE_FIELD_NAME, PRECISION_STEP, NumericField.TYPE_STORED);
     numericFieldMap.put(DATE_FIELD_NAME, dateField);
     doc.add(dateField);
     

Modified: lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java (original)
+++ lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java Sat Aug 27 13:27:01 2011
@@ -34,7 +34,7 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
@@ -1252,7 +1252,7 @@ public class TestQPHelper extends Lucene
     Directory dir = newDirectory();
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new CannedAnalyzer()));
     Document doc = new Document();
-    doc.add(newField("field", "", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("field", "", TextField.TYPE_UNSTORED));
     w.addDocument(doc);
     IndexReader r = IndexReader.open(w, true);
     IndexSearcher s = newSearcher(r);

Modified: lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java (original)
+++ lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SingleFieldTestDb.java Sat Aug 27 13:27:01 2011
@@ -25,7 +25,7 @@ import org.apache.lucene.store.RAMDirect
 import org.apache.lucene.util.Version;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
 
@@ -44,7 +44,7 @@ public class SingleFieldTestDb {
           new MockAnalyzer(random)));
       for (int j = 0; j < docs.length; j++) {
         Document d = new Document();
-        d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.ANALYZED));
+        d.add(new TextField(fieldName, docs[j]));
         writer.addDocument(d);
       }
       writer.close();

Modified: lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java (original)
+++ lucene/dev/trunk/modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java Sat Aug 27 13:27:01 2011
@@ -32,15 +32,15 @@ import org.apache.lucene.analysis.MockTo
 import org.apache.lucene.analysis.TokenFilter;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.DateTools;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.queryparser.classic.CharStream;
 import org.apache.lucene.queryparser.classic.ParseException;
 import org.apache.lucene.queryparser.classic.QueryParser;
@@ -1092,7 +1092,7 @@ public abstract class QueryParserTestBas
     Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, a));
     Document doc = new Document();
-    doc.add(newField("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("f", "the wizard of ozzy", TextField.TYPE_UNSTORED));
     w.addDocument(doc);
     IndexReader r = IndexReader.open(w, true);
     w.close();

Modified: lucene/dev/trunk/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (original)
+++ lucene/dev/trunk/modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java Sat Aug 27 13:27:01 2011
@@ -18,17 +18,16 @@ package org.apache.lucene.search.spell;
  */
 
 import java.io.IOException;
-import java.io.Reader;
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
@@ -607,9 +606,7 @@ public class SpellChecker implements jav
     Document doc = new Document();
     // the word field is never queried on... its indexed so it can be quickly
     // checked for rebuild (and stored for retrieval). Doesn't need norms or TF/pos
-    Field f = new Field(F_WORD, text, Field.Store.YES, Field.Index.NOT_ANALYZED);
-    f.setIndexOptions(IndexOptions.DOCS_ONLY);
-    f.setOmitNorms(true);
+    Field f = new Field(F_WORD, StringField.TYPE_STORED, text);
     doc.add(f); // orig term
     addGram(text, doc, ng1, ng2);
     return doc;
@@ -622,25 +619,22 @@ public class SpellChecker implements jav
       String end = null;
       for (int i = 0; i < len - ng + 1; i++) {
         String gram = text.substring(i, i + ng);
-        Field ngramField = new Field(key, gram, Field.Store.NO, Field.Index.NOT_ANALYZED);
+        FieldType ft = new FieldType(StringField.TYPE_UNSTORED);
+        ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+        Field ngramField = new Field(key, ft, gram);
         // spellchecker does not use positional queries, but we want freqs
         // for scoring these multivalued n-gram fields.
-        ngramField.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
         doc.add(ngramField);
         if (i == 0) {
           // only one term possible in the startXXField, TF/pos and norms aren't needed.
-          Field startField = new Field("start" + ng, gram, Field.Store.NO, Field.Index.NOT_ANALYZED);
-          startField.setIndexOptions(IndexOptions.DOCS_ONLY);
-          startField.setOmitNorms(true);
+          Field startField = new StringField("start" + ng, gram);
           doc.add(startField);
         }
         end = gram;
       }
       if (end != null) { // may not be present if len==ng1
         // only one term possible in the endXXField, TF/pos and norms aren't needed.
-        Field endField = new Field("end" + ng, end, Field.Store.NO, Field.Index.NOT_ANALYZED);
-        endField.setIndexOptions(IndexOptions.DOCS_ONLY);
-        endField.setOmitNorms(true);
+        Field endField = new StringField("end" + ng, end);
         doc.add(endField);
       }
     }

Modified: lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java (original)
+++ lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java Sat Aug 27 13:27:01 2011
@@ -20,7 +20,7 @@ package org.apache.lucene.search.spell;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -39,7 +39,7 @@ public class TestDirectSpellChecker exte
 
     for (int i = 0; i < 20; i++) {
       Document doc = new Document();
-      doc.add(newField("numbers", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("numbers", English.intToEnglish(i), TextField.TYPE_UNSTORED));
       writer.addDocument(doc);
     }
 
@@ -73,7 +73,7 @@ public class TestDirectSpellChecker exte
     // add some more documents
     for (int i = 1000; i < 1100; i++) {
       Document doc = new Document();
-      doc.add(newField("numbers", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("numbers", English.intToEnglish(i), TextField.TYPE_UNSTORED));
       writer.addDocument(doc);
     }
 
@@ -96,13 +96,13 @@ public class TestDirectSpellChecker exte
         new MockAnalyzer(random, MockTokenizer.SIMPLE, true));
 
     Document doc = new Document();
-    doc.add(newField("text", "foobar", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("text", "foobar", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
-    doc.add(newField("text", "foobar", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("text", "foobar", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
-    doc.add(newField("text", "foobaz", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("text", "foobaz", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
-    doc.add(newField("text", "fobar", Field.Store.NO, Field.Index.ANALYZED));
+    doc.add(newField("text", "fobar", TextField.TYPE_UNSTORED));
     writer.addDocument(doc);
    
     IndexReader ir = writer.getReader();
@@ -150,7 +150,7 @@ public class TestDirectSpellChecker exte
 
     for (int i = 0; i < 20; i++) {
       Document doc = new Document();
-      doc.add(newField("numbers", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
+      doc.add(newField("numbers", English.intToEnglish(i), TextField.TYPE_UNSTORED));
       writer.addDocument(doc);
     }
 

Modified: lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (original)
+++ lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java Sat Aug 27 13:27:01 2011
@@ -23,7 +23,7 @@ import java.util.Iterator;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.store.Directory;
@@ -51,23 +51,23 @@ public class TestLuceneDictionary extend
     Document doc;
 
     doc = new  Document();
-    doc.add(newField("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("aaa", "foo", TextField.TYPE_STORED));
     writer.addDocument(doc);
 
     doc = new  Document();
-    doc.add(newField("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("aaa", "foo", TextField.TYPE_STORED));
     writer.addDocument(doc);
 
     doc = new  Document();
-    doc.add(new  Field("contents", "Tom", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("contents", "Tom", TextField.TYPE_STORED));
     writer.addDocument(doc);
 
     doc = new  Document();
-    doc.add(new  Field("contents", "Jerry", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("contents", "Jerry", TextField.TYPE_STORED));
     writer.addDocument(doc);
 
     doc = new Document();
-    doc.add(newField("zzz", "bar", Field.Store.YES, Field.Index.ANALYZED));
+    doc.add(newField("zzz", "bar", TextField.TYPE_STORED));
     writer.addDocument(doc);
 
     writer.optimize();

Modified: lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (original)
+++ lucene/dev/trunk/modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java Sat Aug 27 13:27:01 2011
@@ -28,7 +28,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
@@ -58,9 +58,9 @@ public class TestSpellChecker extends Lu
 
     for (int i = 0; i < 1000; i++) {
       Document doc = new Document();
-      doc.add(newField("field1", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
-      doc.add(newField("field2", English.intToEnglish(i + 1), Field.Store.YES, Field.Index.ANALYZED)); // + word thousand
-      doc.add(newField("field3", "fvei" + (i % 2 == 0 ? " five" : ""), Field.Store.YES, Field.Index.ANALYZED)); // + word thousand
+      doc.add(newField("field1", English.intToEnglish(i), TextField.TYPE_STORED));
+      doc.add(newField("field2", English.intToEnglish(i + 1), TextField.TYPE_STORED)); // + word thousand
+      doc.add(newField("field3", "fvei" + (i % 2 == 0 ? " five" : ""), TextField.TYPE_STORED)); // + word thousand
       writer.addDocument(doc);
     }
     writer.close();

Modified: lucene/dev/trunk/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java (original)
+++ lucene/dev/trunk/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java Sat Aug 27 13:27:01 2011
@@ -27,7 +27,7 @@ import org.apache.lucene.analysis.Analyz
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.collation.ICUCollationKeyAnalyzer;
-import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
 import org.apache.lucene.search.TermRangeQuery;
@@ -164,7 +164,7 @@ public class ICUCollationField extends F
   }
 
   @Override
-  public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException {
+  public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
     writer.writeStr(name, f.stringValue(), true);
   }
 

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java Sat Aug 27 13:27:01 2011
@@ -353,7 +353,7 @@ public class MoreLikeThisHandler extends
       realMLTQuery = new BooleanQuery();
       realMLTQuery.add(boostedMLTQuery, BooleanClause.Occur.MUST);
       realMLTQuery.add(
-          new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getFieldable(uniqueKeyField.getName())))), 
+          new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getField(uniqueKeyField.getName())))), 
             BooleanClause.Occur.MUST_NOT);
       
       DocListAndSet results = new DocListAndSet();

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java Sat Aug 27 13:27:01 2011
@@ -34,9 +34,9 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Fieldable;
-import org.apache.lucene.index.FieldInfo.IndexOptions;
+import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.FieldsEnum;
@@ -163,21 +163,21 @@ public class LukeRequestHandler extends 
   
 
   /**
-   * @return a string representing a Fieldable's flags.  
+   * @return a string representing a IndexableField's flags.  
    */
-  private static String getFieldFlags( Fieldable f )
+  private static String getFieldFlags( IndexableField f )
   {
     StringBuilder flags = new StringBuilder();
-    flags.append( (f != null && f.isIndexed())                     ? FieldFlag.INDEXED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.isTokenized())                   ? FieldFlag.TOKENIZED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.isStored())                      ? FieldFlag.STORED.getAbbreviation() : '-' );
+    flags.append( (f != null && f.indexed())                     ? FieldFlag.INDEXED.getAbbreviation() : '-' );
+    flags.append( (f != null && f.tokenized())                   ? FieldFlag.TOKENIZED.getAbbreviation() : '-' );
+    flags.append( (f != null && f.stored())                      ? FieldFlag.STORED.getAbbreviation() : '-' );
     flags.append( (false)                                          ? FieldFlag.MULTI_VALUED.getAbbreviation() : '-' ); // SchemaField Specific
-    flags.append( (f != null && f.isTermVectorStored())            ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-' );
-    flags.append( (f != null && f.isStoreOffsetWithTermVector())   ? FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation() : '-' );
-    flags.append( (f != null && f.isStorePositionWithTermVector()) ? FieldFlag.TERM_VECTOR_POSITION.getAbbreviation() : '-' );
-    flags.append( (f != null && f.getOmitNorms())                  ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-' );
-    flags.append( (f != null && f.isLazy())                        ? FieldFlag.LAZY.getAbbreviation() : '-' );
-    flags.append( (f != null && f.isBinary())                      ? FieldFlag.BINARY.getAbbreviation() : '-' );
+    flags.append( (f != null && f.storeTermVectors())            ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-' );
+    flags.append( (f != null && f.storeTermVectorOffsets())   ? FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation() : '-' );
+    flags.append( (f != null && f.storeTermVectorPositions()) ? FieldFlag.TERM_VECTOR_POSITION.getAbbreviation() : '-' );
+    flags.append( (f != null && f.omitNorms())                  ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-' );
+    flags.append( (f != null && f.getClass().getSimpleName().equals("LazyField")) ? FieldFlag.LAZY.getAbbreviation() : '-' );
+    flags.append( (f != null && f.binaryValue()!=null)                      ? FieldFlag.BINARY.getAbbreviation() : '-' );
     flags.append( (false)                                          ? FieldFlag.SORT_MISSING_FIRST.getAbbreviation() : '-' ); // SchemaField Specific
     flags.append( (false)                                          ? FieldFlag.SORT_MISSING_LAST.getAbbreviation() : '-' ); // SchemaField Specific
     return flags.toString();
@@ -239,34 +239,34 @@ public class LukeRequestHandler extends 
     final CharsRef spare = new CharsRef();
     SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<Object>();
     for( Object o : doc.getFields() ) {
-      Fieldable fieldable = (Fieldable)o;
+      Field field = (Field)o;
       SimpleOrderedMap<Object> f = new SimpleOrderedMap<Object>();
       
-      SchemaField sfield = schema.getFieldOrNull( fieldable.name() );
+      SchemaField sfield = schema.getFieldOrNull( field.name() );
       FieldType ftype = (sfield==null)?null:sfield.getType();
 
       f.add( "type", (ftype==null)?null:ftype.getTypeName() );
       f.add( "schema", getFieldFlags( sfield ) );
-      f.add( "flags", getFieldFlags( fieldable ) );
+      f.add( "flags", getFieldFlags( field ) );
 
-      Term t = new Term(fieldable.name(), ftype!=null ? ftype.storedToIndexed(fieldable) : fieldable.stringValue());
+      Term t = new Term(field.name(), ftype!=null ? ftype.storedToIndexed(field) : field.stringValue());
 
-      f.add( "value", (ftype==null)?null:ftype.toExternal( fieldable ) );
+      f.add( "value", (ftype==null)?null:ftype.toExternal( field ) );
 
       // TODO: this really should be "stored"
-      f.add( "internal", fieldable.stringValue() );  // may be a binary number
+      f.add( "internal", field.stringValue() );  // may be a binary number
 
-      byte[] arr = fieldable.getBinaryValue();
-      if (arr != null) {
-        f.add( "binary", Base64.byteArrayToBase64(arr, 0, arr.length));
+      BytesRef bytes = field.binaryValue();
+      if (bytes != null) {
+        f.add( "binary", Base64.byteArrayToBase64(bytes.bytes, bytes.offset, bytes.length));
       }
-      f.add( "boost", fieldable.getBoost() );
+      f.add( "boost", field.boost() );
       f.add( "docFreq", t.text()==null ? 0 : reader.docFreq( t ) ); // this can be 0 for non-indexed fields
             
       // If we have a term vector, return that
-      if( fieldable.isTermVectorStored() ) {
+      if( field.storeTermVectors() ) {
         try {
-          TermFreqVector v = reader.getTermFreqVector( docId, fieldable.name() );
+          TermFreqVector v = reader.getTermFreqVector( docId, field.name() );
           if( v != null ) {
             SimpleOrderedMap<Integer> tfv = new SimpleOrderedMap<Integer>();
             for( int i=0; i<v.size(); i++ ) {
@@ -280,7 +280,7 @@ public class LukeRequestHandler extends 
         }
       }
       
-      finfo.add( fieldable.name(), f );
+      finfo.add( field.name(), f );
     }
     return finfo;
   }
@@ -324,7 +324,7 @@ public class LukeRequestHandler extends 
           // Find a document with this field
           try {
             Document doc = searcher.doc( top.scoreDocs[0].doc );
-            Fieldable fld = doc.getFieldable( fieldName );
+            IndexableField fld = doc.getField( fieldName );
             if( fld != null ) {
               f.add( "index", getFieldFlags( fld ) );
             }

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java?rev=1162347&r1=1162346&r2=1162347&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java Sat Aug 27 13:27:01 2011
@@ -17,7 +17,12 @@
 
 package org.apache.solr.handler.component;
 
+import java.io.IOException;
+import java.net.URL;
+import java.util.*;
+
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.index.Term;
@@ -47,10 +52,6 @@ import org.apache.solr.schema.SchemaFiel
 import org.apache.solr.search.*;
 import org.apache.solr.util.SolrPluginUtils;
 
-import java.io.IOException;
-import java.net.URL;
-import java.util.*;
-
 /**
  * TODO!
  * 
@@ -426,7 +427,7 @@ public class QueryComponent extends Sear
       Sort sort = searcher.weightSort(rb.getSortSpec().getSort());
       SortField[] sortFields = sort==null ? new SortField[]{SortField.FIELD_SCORE} : sort.getSort();
       NamedList sortVals = new NamedList(); // order is important for the sort fields
-      Field field = new Field("dummy", "", Field.Store.YES, Field.Index.NO); // a dummy Field
+      Field field = new StringField("dummy", ""); // a dummy Field
       ReaderContext topReaderContext = searcher.getTopReaderContext();
       AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
       AtomicReaderContext currentLeaf = null;