You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2011/08/16 00:03:52 UTC
svn commit: r1158029 [12/15] - in /lucene/dev/branches/fieldtype_conflicted:
lucene/ lucene/contrib/ lucene/contrib/demo/src/java/org/apache/lucene/demo/
lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/
lucene/contrib/highlighter...
Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java Mon Aug 15 22:03:41 2011
@@ -25,7 +25,9 @@ import org.apache.lucene.analysis.MockAn
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
@@ -90,10 +92,12 @@ public class TestSpansAdvanced extends L
final String text) throws IOException {
final Document document = new Document();
- document.add(newField(FIELD_ID, id, Field.Store.YES,
- Field.Index.NOT_ANALYZED));
- document.add(newField(FIELD_TEXT, text, Field.Store.YES,
- Field.Index.ANALYZED));
+ FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+ customType.setStored(true);
+ FieldType customType2 = new FieldType(StringField.TYPE_UNSTORED);
+ customType2.setStored(true);
+ document.add(newField(FIELD_ID, id, customType2));
+ document.add(newField(FIELD_TEXT, text, customType));
writer.addDocument(document);
}
Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java Mon Aug 15 22:03:41 2011
@@ -28,7 +28,7 @@ import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -251,8 +251,8 @@ public class TestBufferedIndexInput exte
);
for(int i=0;i<37;i++) {
Document doc = new Document();
- doc.add(newField("content", "aaa bbb ccc ddd" + i, Field.Store.YES, Field.Index.ANALYZED));
- doc.add(newField("id", "" + i, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(newField("content", "aaa bbb ccc ddd" + i, TextField.TYPE_STORED));
+ doc.add(newField("id", "" + i, TextField.TYPE_STORED));
writer.addDocument(doc);
}
writer.close();
Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestLockFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestLockFactory.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestLockFactory.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestLockFactory.java Mon Aug 15 22:03:41 2011
@@ -25,7 +25,7 @@ import java.util.Map;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
@@ -415,7 +415,7 @@ public class TestLockFactory extends Luc
private void addDoc(IndexWriter writer) throws IOException {
Document doc = new Document();
- doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
+ doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED));
writer.addDocument(doc);
}
}
Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestMultiMMap.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestMultiMMap.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestMultiMMap.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestMultiMMap.java Mon Aug 15 22:03:41 2011
@@ -23,6 +23,7 @@ import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.util.BytesRef;
@@ -123,8 +124,8 @@ public class TestMultiMMap extends Lucen
MockDirectoryWrapper dir = new MockDirectoryWrapper(random, mmapDir);
RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
- Field docid = newField("docid", "0", Field.Store.YES, Field.Index.NOT_ANALYZED);
- Field junk = newField("junk", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
+ Field docid = newField("docid", "0", StringField.TYPE_STORED);
+ Field junk = newField("junk", "", StringField.TYPE_STORED);
doc.add(docid);
doc.add(junk);
Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java Mon Aug 15 22:03:41 2011
@@ -28,6 +28,7 @@ import org.apache.lucene.util._TestUtil;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -59,7 +60,7 @@ public class TestRAMDirectory extends Lu
Document doc = null;
for (int i = 0; i < docsToAdd; i++) {
doc = new Document();
- doc.add(newField("content", English.intToEnglish(i).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(newField("content", English.intToEnglish(i).trim(), StringField.TYPE_STORED));
writer.addDocument(doc);
}
assertEquals(docsToAdd, writer.maxDoc());
@@ -119,7 +120,7 @@ public class TestRAMDirectory extends Lu
public void run() {
for (int j=1; j<docsPerThread; j++) {
Document doc = new Document();
- doc.add(newField("sizeContent", English.intToEnglish(num*docsPerThread+j).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(newField("sizeContent", English.intToEnglish(num*docsPerThread+j).trim(), StringField.TYPE_STORED));
try {
writer.addDocument(doc);
} catch (IOException e) {
Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestWindowsMMap.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestWindowsMMap.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestWindowsMMap.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/store/TestWindowsMMap.java Mon Aug 15 22:03:41 2011
@@ -24,7 +24,7 @@ import org.apache.lucene.util._TestUtil;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@@ -84,7 +84,7 @@ public class TestWindowsMMap extends Luc
for(int dx = 0; dx < num; dx ++) {
String f = randomField();
Document doc = new Document();
- doc.add(newField("data", f, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(newField("data", f, TextField.TYPE_STORED));
writer.addDocument(doc);
}
Modified: lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java Mon Aug 15 22:03:41 2011
@@ -18,7 +18,7 @@ package org.apache.lucene.util;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiReader;
@@ -54,12 +54,12 @@ public class TestFieldCacheSanityChecker
float theFloat = Float.MAX_VALUE;
for (int i = 0; i < NUM_DOCS; i++){
Document doc = new Document();
- doc.add(newField("theLong", String.valueOf(theLong--), Field.Store.NO, Field.Index.NOT_ANALYZED));
- doc.add(newField("theDouble", String.valueOf(theDouble--), Field.Store.NO, Field.Index.NOT_ANALYZED));
- doc.add(newField("theByte", String.valueOf(theByte--), Field.Store.NO, Field.Index.NOT_ANALYZED));
- doc.add(newField("theShort", String.valueOf(theShort--), Field.Store.NO, Field.Index.NOT_ANALYZED));
- doc.add(newField("theInt", String.valueOf(theInt--), Field.Store.NO, Field.Index.NOT_ANALYZED));
- doc.add(newField("theFloat", String.valueOf(theFloat--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+ doc.add(newField("theLong", String.valueOf(theLong--), StringField.TYPE_UNSTORED));
+ doc.add(newField("theDouble", String.valueOf(theDouble--), StringField.TYPE_UNSTORED));
+ doc.add(newField("theByte", String.valueOf(theByte--), StringField.TYPE_UNSTORED));
+ doc.add(newField("theShort", String.valueOf(theShort--), StringField.TYPE_UNSTORED));
+ doc.add(newField("theInt", String.valueOf(theInt--), StringField.TYPE_UNSTORED));
+ doc.add(newField("theFloat", String.valueOf(theFloat--), StringField.TYPE_UNSTORED));
if (0 == i % 3) {
wA.addDocument(doc);
} else {
Modified: lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountAnalyzer.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountAnalyzer.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountAnalyzer.java Mon Aug 15 22:03:41 2011
@@ -17,9 +17,9 @@ package org.apache.lucene.analysis.misce
* limitations under the License.
*/
-import org.apache.lucene.document.Fieldable;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.index.IndexableField;
import java.io.Reader;
import java.io.IOException;
@@ -60,7 +60,7 @@ public final class LimitTokenCountAnalyz
}
@Override
- public int getOffsetGap(Fieldable field) {
+ public int getOffsetGap(IndexableField field) {
return delegate.getOffsetGap(field);
}
Modified: lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java Mon Aug 15 22:03:41 2011
@@ -19,7 +19,7 @@ package org.apache.lucene.analysis.misce
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.IndexableField;
import java.io.Reader;
import java.io.IOException;
@@ -119,10 +119,11 @@ public final class PerFieldAnalyzerWrapp
/** Return the offsetGap from the analyzer assigned to field */
@Override
- public int getOffsetGap(Fieldable field) {
+ public int getOffsetGap(IndexableField field) {
Analyzer analyzer = analyzerMap.get(field.name());
- if (analyzer == null)
+ if (analyzer == null) {
analyzer = defaultAnalyzer;
+ }
return analyzer.getOffsetGap(field);
}
Modified: lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java Mon Aug 15 22:03:41 2011
@@ -4,7 +4,7 @@ import org.apache.lucene.analysis.Analyz
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.standard.ClassicAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
@@ -261,12 +261,12 @@ public class TestClassicAnalyzer extends
// This produces a too-long term:
String contents = "abc xyz x" + bigTerm + " another term";
- doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED));
+ doc.add(new TextField("content", contents));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
- doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.ANALYZED));
+ doc.add(new TextField("content", "abc bbb ccc"));
writer.addDocument(doc);
writer.close();
@@ -297,7 +297,7 @@ public class TestClassicAnalyzer extends
// Make sure we can add a document with exactly the
// maximum length term, and search on that term:
doc = new Document();
- doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.ANALYZED));
+ doc.add(new TextField("content", bigTerm));
ClassicAnalyzer sa = new ClassicAnalyzer(TEST_VERSION_CURRENT);
sa.setMaxTokenLength(100000);
writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, sa));
Modified: lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java Mon Aug 15 22:03:41 2011
@@ -24,6 +24,8 @@ import org.apache.lucene.analysis.TokenS
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -46,8 +48,8 @@ public class TestKeywordAnalyzer extends
TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
Document doc = new Document();
- doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field("description", "Illidium Space Modulator", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("partnum", StringField.TYPE_STORED, "Q36"));
+ doc.add(new Field("description", TextField.TYPE_STORED, "Illidium Space Modulator"));
writer.addDocument(doc);
writer.close();
@@ -74,10 +76,10 @@ public class TestKeywordAnalyzer extends
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer()));
Document doc = new Document();
- doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("partnum", TextField.TYPE_STORED, "Q36"));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("partnum", "Q37", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("partnum", TextField.TYPE_STORED, "Q37"));
writer.addDocument(doc);
writer.close();
Modified: lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java Mon Aug 15 22:03:41 2011
@@ -26,7 +26,7 @@ import org.apache.lucene.analysis.MockAn
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -58,7 +58,7 @@ public class TestLimitTokenCountAnalyzer
for(int i=0;i<10000;i++)
b.append(" a");
b.append(" x");
- doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED));
+ doc.add(newField("field", b.toString(), TextField.TYPE_UNSTORED));
writer.addDocument(doc);
writer.close();
Modified: lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java Mon Aug 15 22:03:41 2011
@@ -26,6 +26,7 @@ import org.apache.lucene.analysis.MockTo
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -51,8 +52,8 @@ public class QueryAutoStopWordAnalyzerTe
Document doc = new Document();
String variedFieldValue = variedFieldValues[i % variedFieldValues.length];
String repetitiveFieldValue = repetitiveFieldValues[i % repetitiveFieldValues.length];
- doc.add(new Field("variedField", variedFieldValue, Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("repetitiveField", repetitiveFieldValue, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("variedField", TextField.TYPE_STORED, variedFieldValue));
+ doc.add(new Field("repetitiveField", TextField.TYPE_STORED, repetitiveFieldValue));
writer.addDocument(doc);
}
writer.close();
Modified: lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java Mon Aug 15 22:03:41 2011
@@ -29,6 +29,7 @@ import org.apache.lucene.analysis.tokena
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
@@ -56,18 +57,15 @@ public class ShingleAnalyzerWrapperTest
Document doc;
doc = new Document();
- doc.add(new Field("content", "please divide this sentence into shingles",
- Field.Store.YES,Field.Index.ANALYZED));
+ doc.add(new Field("content", TextField.TYPE_STORED, "please divide this sentence into shingles"));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("content", "just another test sentence",
- Field.Store.YES,Field.Index.ANALYZED));
+ doc.add(new Field("content", TextField.TYPE_STORED, "just another test sentence"));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("content", "a sentence which contains no test",
- Field.Store.YES,Field.Index.ANALYZED));
+ doc.add(new Field("content", TextField.TYPE_STORED, "a sentence which contains no test"));
writer.addDocument(doc);
writer.close();
Modified: lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java Mon Aug 15 22:03:41 2011
@@ -29,6 +29,8 @@ import org.apache.lucene.analysis.tokena
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.TermPositionVector;
@@ -89,8 +91,12 @@ public class TestTeeSinkTokenFilter exte
Document doc = new Document();
TeeSinkTokenFilter tee = new TeeSinkTokenFilter(analyzer.tokenStream("field", new StringReader("abcd ")));
TokenStream sink = tee.newSinkTokenStream();
- Field f1 = new Field("field", tee, Field.TermVector.WITH_POSITIONS_OFFSETS);
- Field f2 = new Field("field", sink, Field.TermVector.WITH_POSITIONS_OFFSETS);
+ FieldType ft = new FieldType(TextField.TYPE_UNSTORED);
+ ft.setStoreTermVectors(true);
+ ft.setStoreTermVectorOffsets(true);
+ ft.setStoreTermVectorPositions(true);
+ Field f1 = new Field("field", ft, tee);
+ Field f2 = new Field("field", ft, sink);
doc.add(f1);
doc.add(f2);
w.addDocument(doc);
Modified: lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java Mon Aug 15 22:03:41 2011
@@ -26,6 +26,7 @@ import org.apache.lucene.analysis.tokena
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.ScoreDoc;
@@ -36,8 +37,11 @@ import org.apache.lucene.search.TermRang
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
-import org.apache.lucene.document.Field;
import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IndexableBinaryStringTools;
import org.apache.lucene.util.LuceneTestCase;
@@ -81,10 +85,8 @@ public abstract class CollationTestBase
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
- doc.add(new Field("content", "\u0633\u0627\u0628",
- Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("body", "body",
- Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field("content", TextField.TYPE_STORED, "\u0633\u0627\u0628"));
+ doc.add(new Field("body", StringField.TYPE_STORED, "body"));
writer.addDocument(doc);
writer.close();
IndexSearcher searcher = new IndexSearcher(ramDir, true);
@@ -118,8 +120,7 @@ public abstract class CollationTestBase
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a TermRangeQuery with a Farsi
// Collator (or an Arabic one for the case when Farsi is not supported).
- doc.add(new Field("content", "\u0633\u0627\u0628",
- Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("content", TextField.TYPE_STORED, "\u0633\u0627\u0628"));
writer.addDocument(doc);
writer.close();
IndexSearcher searcher = new IndexSearcher(ramDir, true);
@@ -141,10 +142,8 @@ public abstract class CollationTestBase
IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(
TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
- doc.add(new Field("content", "\u0633\u0627\u0628",
- Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("body", "body",
- Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field("content", TextField.TYPE_STORED, "\u0633\u0627\u0628"));
+ doc.add(new Field("body", StringField.TYPE_STORED, "body"));
writer.addDocument(doc);
writer.close();
@@ -204,20 +203,21 @@ public abstract class CollationTestBase
{ "J", "y", "HOT", "HOT", "HOT", "HOT" },
};
+ FieldType customType = new FieldType();
+ customType.setStored(true);
+
for (int i = 0 ; i < sortData.length ; ++i) {
Document doc = new Document();
- doc.add(new Field("tracer", sortData[i][0],
- Field.Store.YES, Field.Index.NO));
- doc.add(new Field("contents", sortData[i][1],
- Field.Store.NO, Field.Index.ANALYZED));
+ doc.add(new Field("tracer", customType, sortData[i][0]));
+ doc.add(new TextField("contents", sortData[i][1]));
if (sortData[i][2] != null)
- doc.add(new Field("US", usAnalyzer.reusableTokenStream("US", new StringReader(sortData[i][2]))));
+ doc.add(new TextField("US", usAnalyzer.reusableTokenStream("US", new StringReader(sortData[i][2]))));
if (sortData[i][3] != null)
- doc.add(new Field("France", franceAnalyzer.reusableTokenStream("France", new StringReader(sortData[i][3]))));
+ doc.add(new TextField("France", franceAnalyzer.reusableTokenStream("France", new StringReader(sortData[i][3]))));
if (sortData[i][4] != null)
- doc.add(new Field("Sweden", swedenAnalyzer.reusableTokenStream("Sweden", new StringReader(sortData[i][4]))));
+ doc.add(new TextField("Sweden", swedenAnalyzer.reusableTokenStream("Sweden", new StringReader(sortData[i][4]))));
if (sortData[i][5] != null)
- doc.add(new Field("Denmark", denmarkAnalyzer.reusableTokenStream("Denmark", new StringReader(sortData[i][5]))));
+ doc.add(new TextField("Denmark", denmarkAnalyzer.reusableTokenStream("Denmark", new StringReader(sortData[i][5]))));
writer.addDocument(doc);
}
writer.optimize();
@@ -250,9 +250,9 @@ public abstract class CollationTestBase
int n = result.length;
for (int i = 0 ; i < n ; ++i) {
Document doc = searcher.doc(result[i].doc);
- String[] v = doc.getValues("tracer");
+ IndexableField[] v = doc.getFields("tracer");
for (int j = 0 ; j < v.length ; ++j) {
- buff.append(v[j]);
+ buff.append(v[j].stringValue());
}
}
assertEquals(expectedResult, buff.toString());
Modified: lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java Mon Aug 15 22:03:41 2011
@@ -34,10 +34,10 @@ import org.apache.lucene.benchmark.byTas
import org.apache.lucene.benchmark.byTask.utils.Format;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.NumericField;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
/**
* Creates {@link Document} objects. Uses a {@link ContentSource} to generate
@@ -94,7 +94,7 @@ public class DocMaker {
final Document doc;
DocData docData = new DocData();
- public DocState(boolean reuseFields, Store store, Store bodyStore, Index index, Index bodyIndex, TermVector termVector) {
+ public DocState(boolean reuseFields, FieldType ft, FieldType bodyFt) {
this.reuseFields = reuseFields;
@@ -103,11 +103,11 @@ public class DocMaker {
numericFields = new HashMap<String,NumericField>();
// Initialize the map with the default fields.
- fields.put(BODY_FIELD, new Field(BODY_FIELD, "", bodyStore, bodyIndex, termVector));
- fields.put(TITLE_FIELD, new Field(TITLE_FIELD, "", store, index, termVector));
- fields.put(DATE_FIELD, new Field(DATE_FIELD, "", store, index, termVector));
- fields.put(ID_FIELD, new Field(ID_FIELD, "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
- fields.put(NAME_FIELD, new Field(NAME_FIELD, "", store, index, termVector));
+ fields.put(BODY_FIELD, new Field(BODY_FIELD, bodyFt, ""));
+ fields.put(TITLE_FIELD, new Field(TITLE_FIELD, ft, ""));
+ fields.put(DATE_FIELD, new Field(DATE_FIELD, ft, ""));
+ fields.put(ID_FIELD, new Field(ID_FIELD, StringField.TYPE_STORED, ""));
+ fields.put(NAME_FIELD, new Field(NAME_FIELD, ft, ""));
numericFields.put(DATE_MSEC_FIELD, new NumericField(DATE_MSEC_FIELD));
numericFields.put(TIME_SEC_FIELD, new NumericField(TIME_SEC_FIELD));
@@ -125,14 +125,14 @@ public class DocMaker {
* <code>reuseFields</code> was set to true, then it attempts to reuse a
* Field instance. If such a field does not exist, it creates a new one.
*/
- Field getField(String name, Store store, Index index, TermVector termVector) {
+ Field getField(String name, FieldType ft) {
if (!reuseFields) {
- return new Field(name, "", store, index, termVector);
+ return new Field(name, ft, "");
}
Field f = fields.get(name);
if (f == null) {
- f = new Field(name, "", store, index, termVector);
+ f = new Field(name, ft, "");
fields.put(name, f);
}
return f;
@@ -179,12 +179,9 @@ public class DocMaker {
protected Config config;
- protected Store storeVal = Store.NO;
- protected Store bodyStoreVal = Store.NO;
- protected Index indexVal = Index.ANALYZED_NO_NORMS;
- protected Index bodyIndexVal = Index.ANALYZED;
- protected TermVector termVecVal = TermVector.NO;
-
+ protected FieldType valType;
+ protected FieldType bodyValType;
+
protected ContentSource source;
protected boolean reuseFields;
protected boolean indexProperties;
@@ -196,6 +193,13 @@ public class DocMaker {
private int printNum = 0;
+ public DocMaker() {
+ valType = new FieldType(TextField.TYPE_UNSTORED);
+ valType.setOmitNorms(true);
+
+ bodyValType = new FieldType(TextField.TYPE_UNSTORED);
+ }
+
// create a doc
// use only part of the body, modify it to keep the rest (or use all if size==0).
// reset the docdata properties so they are not added more than once.
@@ -206,7 +210,10 @@ public class DocMaker {
doc.getFields().clear();
// Set ID_FIELD
- Field idField = ds.getField(ID_FIELD, storeVal, Index.NOT_ANALYZED_NO_NORMS, termVecVal);
+ FieldType ft = new FieldType(valType);
+ ft.setIndexed(true);
+
+ Field idField = ds.getField(ID_FIELD, ft);
int id;
if (r != null) {
id = r.nextInt(updateDocIDLimit);
@@ -223,7 +230,7 @@ public class DocMaker {
String name = docData.getName();
if (name == null) name = "";
name = cnt < 0 ? name : name + "_" + cnt;
- Field nameField = ds.getField(NAME_FIELD, storeVal, indexVal, termVecVal);
+ Field nameField = ds.getField(NAME_FIELD, valType);
nameField.setValue(name);
doc.add(nameField);
@@ -242,7 +249,7 @@ public class DocMaker {
} else {
dateString = "";
}
- Field dateStringField = ds.getField(DATE_FIELD, storeVal, indexVal, termVecVal);
+ Field dateStringField = ds.getField(DATE_FIELD, valType);
dateStringField.setValue(dateString);
doc.add(dateStringField);
@@ -264,7 +271,7 @@ public class DocMaker {
// Set TITLE_FIELD
String title = docData.getTitle();
- Field titleField = ds.getField(TITLE_FIELD, storeVal, indexVal, termVecVal);
+ Field titleField = ds.getField(TITLE_FIELD, valType);
titleField.setValue(title == null ? "" : title);
doc.add(titleField);
@@ -285,12 +292,12 @@ public class DocMaker {
bdy = body.substring(0, size); // use part
docData.setBody(body.substring(size)); // some left
}
- Field bodyField = ds.getField(BODY_FIELD, bodyStoreVal, bodyIndexVal, termVecVal);
+ Field bodyField = ds.getField(BODY_FIELD, bodyValType);
bodyField.setValue(bdy);
doc.add(bodyField);
if (storeBytes) {
- Field bytesField = ds.getField(BYTES_FIELD, Store.YES, Index.NOT_ANALYZED_NO_NORMS, TermVector.NO);
+ Field bytesField = ds.getField(BYTES_FIELD, StringField.TYPE_STORED);
bytesField.setValue(bdy.getBytes("UTF-8"));
doc.add(bytesField);
}
@@ -300,7 +307,7 @@ public class DocMaker {
Properties props = docData.getProps();
if (props != null) {
for (final Map.Entry<Object,Object> entry : props.entrySet()) {
- Field f = ds.getField((String) entry.getKey(), storeVal, indexVal, termVecVal);
+ Field f = ds.getField((String) entry.getKey(), valType);
f.setValue((String) entry.getValue());
doc.add(f);
}
@@ -319,7 +326,7 @@ public class DocMaker {
protected DocState getDocState() {
DocState ds = docState.get();
if (ds == null) {
- ds = new DocState(reuseFields, storeVal, bodyStoreVal, indexVal, bodyIndexVal, termVecVal);
+ ds = new DocState(reuseFields, valType, bodyValType);
docState.set(ds);
}
return ds;
@@ -455,33 +462,23 @@ public class DocMaker {
boolean norms = config.get("doc.tokenized.norms", false);
boolean bodyNorms = config.get("doc.body.tokenized.norms", true);
boolean termVec = config.get("doc.term.vector", false);
- storeVal = (stored ? Field.Store.YES : Field.Store.NO);
- bodyStoreVal = (bodyStored ? Field.Store.YES : Field.Store.NO);
- if (tokenized) {
- indexVal = norms ? Index.ANALYZED : Index.ANALYZED_NO_NORMS;
- } else {
- indexVal = norms ? Index.NOT_ANALYZED : Index.NOT_ANALYZED_NO_NORMS;
- }
-
- if (bodyTokenized) {
- bodyIndexVal = bodyNorms ? Index.ANALYZED : Index.ANALYZED_NO_NORMS;
- } else {
- bodyIndexVal = bodyNorms ? Index.NOT_ANALYZED : Index.NOT_ANALYZED_NO_NORMS;
- }
-
boolean termVecPositions = config.get("doc.term.vector.positions", false);
boolean termVecOffsets = config.get("doc.term.vector.offsets", false);
- if (termVecPositions && termVecOffsets) {
- termVecVal = TermVector.WITH_POSITIONS_OFFSETS;
- } else if (termVecPositions) {
- termVecVal = TermVector.WITH_POSITIONS;
- } else if (termVecOffsets) {
- termVecVal = TermVector.WITH_OFFSETS;
- } else if (termVec) {
- termVecVal = TermVector.YES;
- } else {
- termVecVal = TermVector.NO;
- }
+
+ valType.setStored(stored);
+ bodyValType.setStored(bodyStored);
+ valType.setTokenized(tokenized);
+ valType.setOmitNorms(!norms);
+ bodyValType.setTokenized(bodyTokenized);
+ bodyValType.setOmitNorms(!bodyNorms);
+
+ valType.setStoreTermVectors(termVec);
+ valType.setStoreTermVectorPositions(termVecPositions);
+ valType.setStoreTermVectorOffsets(termVecOffsets);
+ bodyValType.setStoreTermVectors(termVec);
+ bodyValType.setStoreTermVectorPositions(termVecPositions);
+ bodyValType.setStoreTermVectorOffsets(termVecOffsets);
+
storeBytes = config.get("doc.store.body.bytes", false);
reuseFields = config.get("doc.reuse.fields", true);
Modified: lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java Mon Aug 15 22:03:41 2011
@@ -28,8 +28,8 @@ import org.apache.lucene.analysis.Analyz
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.benchmark.byTask.feeds.QueryMaker;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.TopDocs;
@@ -300,10 +300,10 @@ public abstract class ReadTask extends P
* @return A Collection of Field names (Strings)
*/
protected Collection<String> getFieldsToHighlight(Document document) {
- List<Fieldable> fieldables = document.getFields();
- Set<String> result = new HashSet<String>(fieldables.size());
- for (final Fieldable fieldable : fieldables) {
- result.add(fieldable.name());
+ List<IndexableField> fields = document.getFields();
+ Set<String> result = new HashSet<String>(fields.size());
+ for (final IndexableField f : fields) {
+ result.add(f.name());
}
return result;
}
Modified: lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java Mon Aug 15 22:03:41 2011
@@ -26,8 +26,8 @@ import org.apache.lucene.analysis.tokena
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexableField;
/**
* Simple task to test performance of tokenizers. It just
@@ -65,11 +65,11 @@ public class ReadTokensTask extends Perf
@Override
public int doLogic() throws Exception {
- List<Fieldable> fields = doc.getFields();
+ List<IndexableField> fields = doc.getFields();
Analyzer analyzer = getRunData().getAnalyzer();
int tokenCount = 0;
- for(final Fieldable field : fields) {
- if (!field.isTokenized() || field instanceof NumericField) continue;
+ for(final IndexableField field : fields) {
+ if (!field.tokenized() || field instanceof NumericField) continue;
final TokenStream stream;
final TokenStream streamValue = field.tokenStreamValue();
Modified: lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java Mon Aug 15 22:03:41 2011
@@ -16,20 +16,20 @@ package org.apache.lucene.benchmark.byTa
*/
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.StringTokenizer;
+
import org.apache.lucene.benchmark.byTask.PerfRunData;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.SetBasedFieldSelector;
import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DocumentStoredFieldVisitor;
+import org.apache.lucene.index.DocumentStoredFieldVisitor;
import org.apache.lucene.index.IndexReader;
-import java.util.StringTokenizer;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.Collections;
-import java.io.IOException;
-
/**
- * Search and Traverse and Retrieve docs task using a SetBasedFieldSelector.
+ * Search and Traverse and Retrieve docs task using a
+ * FieldVisitor loading only the requested fields.
*
* <p>Note: This task reuses the reader if it is already open.
* Otherwise a reader is opened at start and closed at the end.
@@ -41,7 +41,8 @@ import java.io.IOException;
*/
public class SearchTravRetLoadFieldSelectorTask extends SearchTravTask {
- protected FieldSelector fieldSelector;
+ protected Set<String> fieldsToLoad;
+
public SearchTravRetLoadFieldSelectorTask(PerfRunData runData) {
super(runData);
@@ -55,18 +56,23 @@ public class SearchTravRetLoadFieldSelec
@Override
protected Document retrieveDoc(IndexReader ir, int id) throws IOException {
- return ir.document(id, fieldSelector);
+ if (fieldsToLoad == null) {
+ return ir.document(id);
+ } else {
+ DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad);
+ ir.document(id, visitor);
+ return visitor.getDocument();
+ }
}
@Override
public void setParams(String params) {
this.params = params; // cannot just call super.setParams(), b/c it's params differ.
- Set<String> fieldsToLoad = new HashSet<String>();
+ fieldsToLoad = new HashSet<String>();
for (StringTokenizer tokenizer = new StringTokenizer(params, ","); tokenizer.hasMoreTokens();) {
String s = tokenizer.nextToken();
fieldsToLoad.add(s);
}
- fieldSelector = new SetBasedFieldSelector(fieldsToLoad, Collections.<String> emptySet());
}
Modified: lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java Mon Aug 15 22:03:41 2011
@@ -33,6 +33,7 @@ import org.apache.lucene.benchmark.byTas
import org.apache.lucene.benchmark.byTask.utils.StreamUtils;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexableField;
/**
* A task which writes documents, one line per document. Each line is in the
@@ -172,7 +173,7 @@ public class WriteLineDocTask extends Pe
boolean sufficient = !checkSufficientFields;
for (int i=0; i<fieldsToWrite.length; i++) {
- Field f = doc.getField(fieldsToWrite[i]);
+ IndexableField f = doc.getField(fieldsToWrite[i]);
String text = f == null ? "" : matcher.reset(f.stringValue()).replaceAll(" ").trim();
sb.append(text).append(SEP);
sufficient |= text.length()>0 && sufficientFields[i];
Modified: lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java Mon Aug 15 22:03:41 2011
@@ -17,18 +17,20 @@
package org.apache.lucene.benchmark.quality.utils;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.FieldSelectorResult;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.IndexInput;
/**
* Utility: extract doc names from an index
*/
public class DocNameExtractor {
- private FieldSelector fldSel;
- private String docNameField;
+ private final String docNameField;
/**
* Constructor for DocNameExtractor.
@@ -36,13 +38,6 @@ public class DocNameExtractor {
*/
public DocNameExtractor (final String docNameField) {
this.docNameField = docNameField;
- fldSel = new FieldSelector() {
- public FieldSelectorResult accept(String fieldName) {
- return fieldName.equals(docNameField) ?
- FieldSelectorResult.LOAD_AND_BREAK :
- FieldSelectorResult.NO_LOAD;
- }
- };
}
/**
@@ -53,7 +48,25 @@ public class DocNameExtractor {
* @throws IOException if cannot extract the doc name from the index.
*/
public String docName(IndexSearcher searcher, int docid) throws IOException {
- return searcher.doc(docid,fldSel).get(docNameField);
+ final List<String> name = new ArrayList<String>();
+ searcher.getIndexReader().document(docid, new StoredFieldVisitor() {
+ @Override
+ public boolean stringField(FieldInfo fieldInfo, IndexInput in, int numUTF8Bytes) throws IOException {
+ if (fieldInfo.name.equals(docNameField) && name.size() == 0) {
+ final byte[] b = new byte[numUTF8Bytes];
+ in.readBytes(b, 0, b.length);
+ name.add(new String(b, "UTF-8"));
+ } else {
+ in.seek(in.getFilePointer() + numUTF8Bytes);
+ }
+ return false;
+ }
+ });
+ if (name.size() != 0) {
+ return name.get(0);
+ } else {
+ return null;
+ }
}
}
Modified: lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java Mon Aug 15 22:03:41 2011
@@ -137,28 +137,28 @@ public class DocMakerTest extends Benchm
// Don't set anything, use the defaults
doc = createTestNormsDocument(false, false, false, false);
- assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms());
- assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms());
+ assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms());
+ assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms());
// Set norms to false
doc = createTestNormsDocument(true, false, false, false);
- assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms());
- assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms());
+ assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms());
+ assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms());
// Set norms to true
doc = createTestNormsDocument(true, true, false, false);
- assertFalse(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms());
- assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms());
+ assertFalse(doc.getField(DocMaker.TITLE_FIELD).omitNorms());
+ assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms());
// Set body norms to false
doc = createTestNormsDocument(false, false, true, false);
- assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms());
- assertTrue(doc.getField(DocMaker.BODY_FIELD).getOmitNorms());
+ assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms());
+ assertTrue(doc.getField(DocMaker.BODY_FIELD).omitNorms());
// Set body norms to true
doc = createTestNormsDocument(false, false, true, true);
- assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms());
- assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms());
+ assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms());
+ assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms());
}
}
Modified: lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java Mon Aug 15 22:03:41 2011
@@ -34,8 +34,7 @@ import org.apache.lucene.benchmark.byTas
import org.apache.lucene.benchmark.byTask.utils.StreamUtils.Type;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
/** Tests the functionality of {@link WriteLineDocTask}. */
public class WriteLineDocTaskTest extends BenchmarkTestCase {
@@ -46,9 +45,9 @@ public class WriteLineDocTaskTest extend
@Override
public Document makeDocument() throws Exception {
Document doc = new Document();
- doc.add(new Field(BODY_FIELD, "body", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
- doc.add(new Field(TITLE_FIELD, "title", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
- doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+ doc.add(new StringField(BODY_FIELD, "body"));
+ doc.add(new StringField(TITLE_FIELD, "title"));
+ doc.add(new StringField(DATE_FIELD, "date"));
return doc;
}
@@ -60,9 +59,9 @@ public class WriteLineDocTaskTest extend
@Override
public Document makeDocument() throws Exception {
Document doc = new Document();
- doc.add(new Field(BODY_FIELD, "body\r\ntext\ttwo", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
- doc.add(new Field(TITLE_FIELD, "title\r\ntext", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
- doc.add(new Field(DATE_FIELD, "date\r\ntext", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+ doc.add(new StringField(BODY_FIELD, "body\r\ntext\ttwo"));
+ doc.add(new StringField(TITLE_FIELD, "title\r\ntext"));
+ doc.add(new StringField(DATE_FIELD, "date\r\ntext"));
return doc;
}
@@ -73,8 +72,8 @@ public class WriteLineDocTaskTest extend
@Override
public Document makeDocument() throws Exception {
Document doc = new Document();
- doc.add(new Field(TITLE_FIELD, "title", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
- doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+ doc.add(new StringField(TITLE_FIELD, "title"));
+ doc.add(new StringField(DATE_FIELD, "date"));
return doc;
}
}
@@ -84,8 +83,8 @@ public class WriteLineDocTaskTest extend
@Override
public Document makeDocument() throws Exception {
Document doc = new Document();
- doc.add(new Field(BODY_FIELD, "body", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
- doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+ doc.add(new StringField(BODY_FIELD, "body"));
+ doc.add(new StringField(DATE_FIELD, "date"));
return doc;
}
}
@@ -95,7 +94,7 @@ public class WriteLineDocTaskTest extend
@Override
public Document makeDocument() throws Exception {
Document doc = new Document();
- doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+ doc.add(new StringField(DATE_FIELD, "date"));
return doc;
}
}
@@ -106,7 +105,7 @@ public class WriteLineDocTaskTest extend
@Override
public Document makeDocument() throws Exception {
Document doc = new Document();
- doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+ doc.add(new StringField(DATE_FIELD, "date"));
return doc;
}
}
@@ -126,9 +125,9 @@ public class WriteLineDocTaskTest extend
public Document makeDocument() throws Exception {
Document doc = new Document();
String name = Thread.currentThread().getName();
- doc.add(new Field(BODY_FIELD, "body_" + name, Store.NO, Index.NOT_ANALYZED_NO_NORMS));
- doc.add(new Field(TITLE_FIELD, "title_" + name, Store.NO, Index.NOT_ANALYZED_NO_NORMS));
- doc.add(new Field(DATE_FIELD, "date_" + name, Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+ doc.add(new StringField(BODY_FIELD, "body_" + name));
+ doc.add(new StringField(TITLE_FIELD, "title_" + name));
+ doc.add(new StringField(DATE_FIELD, "date_" + name));
return doc;
}
Modified: lucene/dev/branches/fieldtype_conflicted/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java Mon Aug 15 22:03:41 2011
@@ -5,7 +5,7 @@ package org.apache.lucene.search.groupin
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
+ * (the "License")); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
@@ -20,6 +20,8 @@ package org.apache.lucene.search.groupin
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
@@ -32,6 +34,8 @@ public class TermAllGroupsCollectorTest
public void testTotalGroupCount() throws Exception {
final String groupField = "author";
+ FieldType customType = new FieldType();
+ customType.setStored(true);
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(
@@ -41,51 +45,51 @@ public class TermAllGroupsCollectorTest
new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
// 0
Document doc = new Document();
- doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "random text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "1", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "random text"));
+ doc.add(new Field("id", customType, "1"));
w.addDocument(doc);
// 1
doc = new Document();
- doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "some more random text blob", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "2", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "some more random text blob"));
+ doc.add(new Field("id", customType, "2"));
w.addDocument(doc);
// 2
doc = new Document();
- doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "some more random textual data", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "3", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "some more random textual data"));
+ doc.add(new Field("id", customType, "3"));
w.addDocument(doc);
w.commit(); // To ensure a second segment
// 3
doc = new Document();
- doc.add(new Field(groupField, "author2", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "some random text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "4", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author2"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "some random text"));
+ doc.add(new Field("id", customType, "4"));
w.addDocument(doc);
// 4
doc = new Document();
- doc.add(new Field(groupField, "author3", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "some more random text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "5", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author3"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "some more random text"));
+ doc.add(new Field("id", customType, "5"));
w.addDocument(doc);
// 5
doc = new Document();
- doc.add(new Field(groupField, "author3", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "random blob", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "6", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author3"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "random blob"));
+ doc.add(new Field("id", customType, "6"));
w.addDocument(doc);
// 6 -- no author field
doc = new Document();
- doc.add(new Field("content", "random word stuck in alot of other text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "6", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field("content", TextField.TYPE_STORED, "random word stuck in alot of other text"));
+ doc.add(new Field("id", customType, "6"));
w.addDocument(doc);
IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
Modified: lucene/dev/branches/fieldtype_conflicted/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java Mon Aug 15 22:03:41 2011
@@ -23,8 +23,14 @@ import java.util.*;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.NumericField;
+<<<<<<<
import org.apache.lucene.index.FieldInfo.IndexOptions;
+=======
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
+>>>>>>>
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
@@ -47,6 +53,9 @@ public class TestGrouping extends Lucene
final String groupField = "author";
+ FieldType customType = new FieldType();
+ customType.setStored(true);
+
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(
random,
@@ -55,50 +64,50 @@ public class TestGrouping extends Lucene
new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
// 0
Document doc = new Document();
- doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "random text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "1", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "random text"));
+ doc.add(new Field("id", customType, "1"));
w.addDocument(doc);
// 1
doc = new Document();
- doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "some more random text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "2", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "some more random text"));
+ doc.add(new Field("id", customType, "2"));
w.addDocument(doc);
// 2
doc = new Document();
- doc.add(new Field(groupField, "author1", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "some more random textual data", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "3", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author1"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "some more random textual data"));
+ doc.add(new Field("id", customType, "3"));
w.addDocument(doc);
// 3
doc = new Document();
- doc.add(new Field(groupField, "author2", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "some random text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "4", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author2"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "some random text"));
+ doc.add(new Field("id", customType, "4"));
w.addDocument(doc);
// 4
doc = new Document();
- doc.add(new Field(groupField, "author3", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "some more random text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "5", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author3"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "some more random text"));
+ doc.add(new Field("id", customType, "5"));
w.addDocument(doc);
// 5
doc = new Document();
- doc.add(new Field(groupField, "author3", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("content", "random", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "6", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field(groupField, TextField.TYPE_STORED, "author3"));
+ doc.add(new Field("content", TextField.TYPE_STORED, "random"));
+ doc.add(new Field("id", customType, "6"));
w.addDocument(doc);
// 6 -- no author field
doc = new Document();
- doc.add(new Field("content", "random word stuck in alot of other text", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("id", "6", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field("content", TextField.TYPE_STORED, "random word stuck in alot of other text"));
+ doc.add(new Field("id", customType, "6"));
w.addDocument(doc);
IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
@@ -386,18 +395,22 @@ public class TestGrouping extends Lucene
Document doc = new Document();
docs.add(doc);
if (groupValue.group != null) {
- doc.add(newField("group", groupValue.group.utf8ToString(), Field.Index.NOT_ANALYZED));
+ doc.add(newField("group", groupValue.group.utf8ToString(), StringField.TYPE_UNSTORED));
}
- doc.add(newField("sort1", groupValue.sort1.utf8ToString(), Field.Index.NOT_ANALYZED));
- doc.add(newField("sort2", groupValue.sort2.utf8ToString(), Field.Index.NOT_ANALYZED));
+ doc.add(newField("sort1", groupValue.sort1.utf8ToString(), StringField.TYPE_UNSTORED));
+ doc.add(newField("sort2", groupValue.sort2.utf8ToString(), StringField.TYPE_UNSTORED));
doc.add(new NumericField("id").setIntValue(groupValue.id));
- doc.add(newField("content", groupValue.content, Field.Index.ANALYZED));
+ doc.add(newField("content", groupValue.content, TextField.TYPE_UNSTORED));
//System.out.println("TEST: doc content=" + groupValue.content + " group=" + (groupValue.group == null ? "null" : groupValue.group.utf8ToString()) + " sort1=" + groupValue.sort1.utf8ToString() + " id=" + groupValue.id);
}
// So we can pull filter marking last doc in block:
+<<<<<<<
final Field groupEnd = newField("groupend", "x", Field.Index.NOT_ANALYZED);
groupEnd.setIndexOptions(IndexOptions.DOCS_ONLY);
groupEnd.setOmitNorms(true);
+=======
+ final Field groupEnd = newField("groupend", "x", StringField.TYPE_UNSTORED);
+>>>>>>>
docs.get(docs.size()-1).add(groupEnd);
// Add as a doc block:
w.addDocuments(docs);
@@ -497,15 +510,15 @@ public class TestGrouping extends Lucene
Document doc = new Document();
Document docNoGroup = new Document();
- Field group = newField("group", "", Field.Index.NOT_ANALYZED);
+ Field group = newField("group", "", StringField.TYPE_UNSTORED);
doc.add(group);
- Field sort1 = newField("sort1", "", Field.Index.NOT_ANALYZED);
+ Field sort1 = newField("sort1", "", StringField.TYPE_UNSTORED);
doc.add(sort1);
docNoGroup.add(sort1);
- Field sort2 = newField("sort2", "", Field.Index.NOT_ANALYZED);
+ Field sort2 = newField("sort2", "", StringField.TYPE_UNSTORED);
doc.add(sort2);
docNoGroup.add(sort2);
- Field content = newField("content", "", Field.Index.ANALYZED);
+ Field content = newField("content", "", TextField.TYPE_UNSTORED);
doc.add(content);
docNoGroup.add(content);
NumericField id = new NumericField("id");
Modified: lucene/dev/branches/fieldtype_conflicted/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java Mon Aug 15 22:03:41 2011
@@ -20,6 +20,7 @@ import org.apache.lucene.analysis.TokenS
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermFreqVector;
import org.apache.lucene.search.*;
@@ -822,8 +823,22 @@ public final class MoreLikeThis {
* </ol>
* This is a somewhat "advanced" routine, and in general only the 1st entry in the array is of interest.
* This method is exposed so that you can identify the "interesting words" in a document.
+<<<<<<<
* For an easier method to call see {@link #retrieveInterestingTerms retrieveInterestingTerms()}.
*
+=======
+ // field does not store term vector info
+ if (vector == null) {
+ Document d=ir.document(docNum);
+ IndexableField text[]=d.getFields(fieldName);
+ if(text!=null)
+ {
+ for (int j = 0; j < text.length; j++) {
+ addTermFrequencies(new StringReader(text[j].stringValue()), termFreqMap, fieldName);
+ }
+ }
+ }
+>>>>>>>
* @param r the reader that has the content of the document
* @param fieldName field passed to the analyzer to use when analyzing the content
* @return the most interesting words in the document ordered by score, with the highest scoring, or best entry, first
Modified: lucene/dev/branches/fieldtype_conflicted/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java Mon Aug 15 22:03:41 2011
@@ -4,7 +4,13 @@ import org.apache.lucene.analysis.Analyz
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+<<<<<<<
import org.apache.lucene.document.Fieldable;
+=======
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.RandomIndexWriter;
+>>>>>>>
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.queries.function.valuesource.ByteFieldSource;
@@ -128,23 +134,26 @@ public abstract class FunctionTestSetup
private static void addDoc(RandomIndexWriter iw, int i) throws Exception {
Document d = new Document();
- Fieldable f;
+ Field f;
int scoreAndID = i + 1;
- f = newField(ID_FIELD, id2String(scoreAndID), Field.Store.YES, Field.Index.NOT_ANALYZED); // for debug purposes
- f.setOmitNorms(true);
+ FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+ customType.setStored(true);
+ customType.setTokenized(false);
+ customType.setOmitNorms(true);
+
+ f = newField(ID_FIELD, id2String(scoreAndID), customType); // for debug purposes
d.add(f);
- f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), Field.Store.NO, Field.Index.ANALYZED); // for regular search
- f.setOmitNorms(true);
+ FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED);
+ customType2.setOmitNorms(true);
+ f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), customType2); // for regular search
d.add(f);
- f = newField(INT_FIELD, "" + scoreAndID, Field.Store.NO, Field.Index.NOT_ANALYZED); // for function scoring
- f.setOmitNorms(true);
+ f = newField(INT_FIELD, "" + scoreAndID, customType); // for function scoring
d.add(f);
- f = newField(FLOAT_FIELD, scoreAndID + ".000", Field.Store.NO, Field.Index.NOT_ANALYZED); // for function scoring
- f.setOmitNorms(true);
+ f = newField(FLOAT_FIELD, scoreAndID + ".000", customType); // for function scoring
d.add(f);
iw.addDocument(d);
Modified: lucene/dev/branches/fieldtype_conflicted/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java Mon Aug 15 22:03:41 2011
@@ -27,6 +27,8 @@ import org.apache.lucene.analysis.MockAn
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.BooleanClause;
@@ -66,7 +68,9 @@ public class TestMoreLikeThis extends Lu
private void addDoc(RandomIndexWriter writer, String text) throws IOException {
Document doc = new Document();
- doc.add(newField("text", text, Field.Store.YES, Field.Index.ANALYZED));
+ FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+ customType.setStored(true);
+ doc.add(newField("text", text, customType));
writer.addDocument(doc);
}
Modified: lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java Mon Aug 15 22:03:41 2011
@@ -26,7 +26,7 @@ import org.apache.lucene.analysis.Analyz
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.IndexSearcher;
@@ -284,7 +284,7 @@ public class TestMultiFieldQueryParser e
Directory ramDir = newDirectory();
IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
- doc.add(newField("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED));
+ doc.add(newField("body", "blah the footest blah", TextField.TYPE_UNSTORED));
iw.addDocument(doc);
iw.close();
Modified: lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java Mon Aug 15 22:03:41 2011
@@ -37,7 +37,7 @@ import org.apache.lucene.analysis.tokena
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
@@ -1092,7 +1092,7 @@ public class TestQueryParser extends Luc
Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, a));
Document doc = new Document();
- doc.add(newField("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED));
+ doc.add(newField("f", "the wizard of ozzy", TextField.TYPE_UNSTORED));
w.addDocument(doc);
IndexReader r = IndexReader.open(w, true);
w.close();
Modified: lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java Mon Aug 15 22:03:41 2011
@@ -22,7 +22,8 @@ import java.util.HashSet;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
@@ -113,12 +114,12 @@ public class TestComplexPhraseQuery exte
super.setUp();
rd = newDirectory();
IndexWriter w = new IndexWriter(rd, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+ FieldType customType = new FieldType(TextField.TYPE_UNSTORED);
+ customType.setStored(true);
for (int i = 0; i < docsContent.length; i++) {
Document doc = new Document();
- doc.add(newField("name", docsContent[i].name, Field.Store.YES,
- Field.Index.ANALYZED));
- doc.add(newField("id", docsContent[i].id, Field.Store.YES,
- Field.Index.ANALYZED));
+ doc.add(newField("name", docsContent[i].name, customType));
+ doc.add(newField("id", docsContent[i].id, customType));
w.addDocument(doc);
}
w.close();
Modified: lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java?rev=1158029&r1=1158028&r2=1158029&view=diff
==============================================================================
--- lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java (original)
+++ lucene/dev/branches/fieldtype_conflicted/modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java Mon Aug 15 22:03:41 2011
@@ -25,7 +25,7 @@ import org.apache.lucene.analysis.Analyz
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler;
@@ -321,8 +321,7 @@ public class TestMultiFieldQPHelper exte
Directory ramDir = newDirectory();
IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
- doc.add(newField("body", "blah the footest blah", Field.Store.NO,
- Field.Index.ANALYZED));
+ doc.add(newField("body", "blah the footest blah", TextField.TYPE_UNSTORED));
iw.addDocument(doc);
iw.close();