You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by bu...@apache.org on 2011/02/21 19:50:40 UTC
svn commit: r1073110 [2/2] - in /lucene/dev/trunk/lucene: ./
src/java/org/apache/lucene/index/
src/java/org/apache/lucene/index/codecs/preflex/
src/test/org/apache/lucene/index/
Added: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java?rev=1073110&view=auto
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (added)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java Mon Feb 21 18:50:39 2011
@@ -0,0 +1,222 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+public class TestConsistentFieldNumbers extends LuceneTestCase {
+
+ @Test
+ public void testSameFieldNumbersAcrossSegments() throws Exception {
+ for (int i = 0; i < 2; i++) {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
+
+ Document d1 = new Document();
+ d1.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.NO));
+ d1.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO));
+ writer.addDocument(d1);
+
+ if (i == 1) {
+ writer.close();
+ writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
+ } else {
+ writer.commit();
+ }
+
+ Document d2 = new Document();
+ d2.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO));
+ d2.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.YES));
+ d2.add(new Field("f3", "third field", Store.YES, Index.ANALYZED, TermVector.NO));
+ d2.add(new Field("f4", "fourth field", Store.YES, Index.ANALYZED, TermVector.NO));
+ writer.addDocument(d2);
+
+ writer.close();
+
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(2, sis.size());
+
+ FieldInfos fis1 = sis.info(0).getFieldInfos();
+ FieldInfos fis2 = sis.info(1).getFieldInfos();
+
+ assertEquals("f1", fis1.fieldInfo(0).name);
+ assertEquals("f2", fis1.fieldInfo(1).name);
+ assertEquals("f1", fis2.fieldInfo(0).name);
+ assertEquals("f2", fis2.fieldInfo(1).name);
+ assertEquals("f3", fis2.fieldInfo(2).name);
+ assertEquals("f4", fis2.fieldInfo(3).name);
+
+ writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+ writer.optimize();
+ writer.close();
+
+ sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(1, sis.size());
+
+ FieldInfos fis3 = sis.info(0).getFieldInfos();
+
+ assertEquals("f1", fis3.fieldInfo(0).name);
+ assertEquals("f2", fis3.fieldInfo(1).name);
+ assertEquals("f3", fis3.fieldInfo(2).name);
+ assertEquals("f4", fis3.fieldInfo(3).name);
+
+
+ dir.close();
+ }
+ }
+
+ @Test
+ public void testAddIndexes() throws Exception {
+ Directory dir1 = newDirectory();
+ Directory dir2 = newDirectory();
+ IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
+
+ Document d1 = new Document();
+ d1.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.NO));
+ d1.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO));
+ writer.addDocument(d1);
+
+ writer.close();
+ writer = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
+
+ Document d2 = new Document();
+ d2.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO));
+ d2.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.YES));
+ d2.add(new Field("f3", "third field", Store.YES, Index.ANALYZED, TermVector.NO));
+ d2.add(new Field("f4", "fourth field", Store.YES, Index.ANALYZED, TermVector.NO));
+ writer.addDocument(d2);
+
+ writer.close();
+
+ writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
+ writer.addIndexes(dir2);
+ writer.close();
+
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir1);
+ assertEquals(2, sis.size());
+
+ FieldInfos fis1 = sis.info(0).getFieldInfos();
+ FieldInfos fis2 = sis.info(1).getFieldInfos();
+
+ assertEquals("f1", fis1.fieldInfo(0).name);
+ assertEquals("f2", fis1.fieldInfo(1).name);
+ // make sure the ordering of the "external" segment is preserved
+ assertEquals("f2", fis2.fieldInfo(0).name);
+ assertEquals("f1", fis2.fieldInfo(1).name);
+ assertEquals("f3", fis2.fieldInfo(2).name);
+ assertEquals("f4", fis2.fieldInfo(3).name);
+
+ writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+ writer.optimize();
+ writer.close();
+
+ sis = new SegmentInfos();
+ sis.read(dir1);
+ assertEquals(1, sis.size());
+
+ FieldInfos fis3 = sis.info(0).getFieldInfos();
+
+ // after merging the ordering should be identical to the first segment
+ assertEquals("f1", fis3.fieldInfo(0).name);
+ assertEquals("f2", fis3.fieldInfo(1).name);
+ assertEquals("f3", fis3.fieldInfo(2).name);
+ assertEquals("f4", fis3.fieldInfo(3).name);
+
+ dir1.close();
+ dir2.close();
+ }
+
+ @Test
+ public void testManyFields() throws Exception {
+ final int NUM_DOCS = 2000;
+ final int MAX_FIELDS = 50;
+
+ int[][] docs = new int[NUM_DOCS][4];
+ for (int i = 0; i < docs.length; i++) {
+ for (int j = 0; j < docs[i].length;j++) {
+ docs[i][j] = random.nextInt(MAX_FIELDS);
+ }
+ }
+
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
+
+ for (int i = 0; i < NUM_DOCS; i++) {
+ Document d = new Document();
+ for (int j = 0; j < docs[i].length; j++) {
+ d.add(getField(docs[i][j]));
+ }
+
+ writer.addDocument(d);
+ }
+
+ writer.optimize();
+ writer.close();
+
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ for (SegmentInfo si : sis) {
+ FieldInfos fis = si.getFieldInfos();
+
+ for (FieldInfo fi : fis) {
+ Field expected = getField(Integer.parseInt(fi.name));
+ assertEquals(expected.isIndexed(), fi.isIndexed);
+ assertEquals(expected.isTermVectorStored(), fi.storeTermVector);
+ assertEquals(expected.isStorePositionWithTermVector(), fi.storePositionWithTermVector);
+ assertEquals(expected.isStoreOffsetWithTermVector(), fi.storeOffsetWithTermVector);
+ }
+ }
+
+ dir.close();
+ }
+
+ private Field getField(int number) {
+ int mode = number % 16;
+ String fieldName = "" + number;
+ switch (mode) {
+ case 0: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.NO);
+ case 1: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.NO);
+ case 2: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.NO);
+ case 3: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.NO);
+ case 4: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.WITH_OFFSETS);
+ case 5: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.WITH_OFFSETS);
+ case 6: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_OFFSETS);
+ case 7: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.WITH_OFFSETS);
+ case 8: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS);
+ case 9: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS);
+ case 10: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS);
+ case 11: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS);
+ case 12: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS);
+ case 13: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS);
+ case 14: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS);
+ case 15: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS);
+ default: return null;
+ }
+ }
+}
Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDoc.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDoc.java?rev=1073110&r1=1073109&r2=1073110&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDoc.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDoc.java Mon Feb 21 18:50:39 2011
@@ -204,8 +204,8 @@ public class TestDoc extends LuceneTestC
r2.close();
final SegmentInfo info = new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir,
- false, merger.fieldInfos().hasProx(), merger.getSegmentCodecs(),
- merger.fieldInfos().hasVectors());
+ false, merger.getSegmentCodecs(),
+ merger.fieldInfos());
if (useCompoundFile) {
Collection<String> filesToDelete = merger.createCompoundFile(merged + ".cfs", info);
Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java?rev=1073110&r1=1073109&r2=1073110&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java Mon Feb 21 18:50:39 2011
@@ -25,20 +25,20 @@ import org.apache.lucene.analysis.MockAn
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.Fieldable;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
-import org.apache.lucene.util.BytesRef;
public class TestDocumentWriter extends LuceneTestCase {
private Directory dir;
@@ -98,8 +98,7 @@ public class TestDocumentWriter extends
// test that the norms are not present in the segment if
// omitNorms is true
- for (int i = 0; i < reader.core.fieldInfos.size(); i++) {
- FieldInfo fi = reader.core.fieldInfos.fieldInfo(i);
+ for (FieldInfo fi : reader.core.fieldInfos) {
if (fi.isIndexed) {
assertTrue(fi.omitNorms == !reader.hasNorms(fi.name));
}
Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java?rev=1073110&r1=1073109&r2=1073110&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Mon Feb 21 18:50:39 2011
@@ -92,10 +92,9 @@ public class TestIndexFileDeleter extend
CompoundFileReader cfsReader = new CompoundFileReader(dir, "_2.cfs");
FieldInfos fieldInfos = new FieldInfos(cfsReader, "_2.fnm");
int contentFieldIndex = -1;
- for(i=0;i<fieldInfos.size();i++) {
- FieldInfo fi = fieldInfos.fieldInfo(i);
+ for (FieldInfo fi : fieldInfos) {
if (fi.name.equals("content")) {
- contentFieldIndex = i;
+ contentFieldIndex = fi.number;
break;
}
}
Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java?rev=1073110&r1=1073109&r2=1073110&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java Mon Feb 21 18:50:39 2011
@@ -216,7 +216,7 @@ public class TestPerFieldCodecSupport ex
IndexFileNames.FIELD_INFOS_EXTENSION));
FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
assertEquals("faild for segment index: " + i, codec[i],
- codecInfo.codecs[fieldInfo.codecId]);
+ codecInfo.codecs[fieldInfo.getCodecId()]);
}
}
Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java?rev=1073110&r1=1073109&r2=1073110&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java Mon Feb 21 18:50:39 2011
@@ -79,8 +79,8 @@ public class TestSegmentMerger extends L
int docsMerged = merger.merge();
assertTrue(docsMerged == 2);
//Should be able to open a new SegmentReader against the new directory
- SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, merger.fieldInfos().hasProx(),
- merger.getSegmentCodecs(), merger.fieldInfos().hasVectors()),
+ SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false,
+ merger.getSegmentCodecs(), merger.fieldInfos()),
BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
assertTrue(mergedReader != null);