You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by si...@apache.org on 2011/03/30 11:17:42 UTC
svn commit: r1086876 [9/18] - in /lucene/dev/branches/realtime_search: ./
dev-tools/eclipse/ dev-tools/idea/ dev-tools/idea/.idea/libraries/
dev-tools/idea/lucene/ dev-tools/idea/solr/
dev-tools/idea/solr/contrib/analysis-extras/ dev-tools/idea/solr/co...
Modified: lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java Wed Mar 30 09:17:25 2011
@@ -83,9 +83,9 @@ import org.junit.runners.model.Initializ
* <code>super.tearDown()</code>
* </p>
*
- * @After - replaces setup
- * @Before - replaces teardown
- * @Test - any public method with this annotation is a test case, regardless
+ * <code>@After</code> - replaces setup
+ * <code>@Before</code> - replaces teardown
+ * <code>@Test</code> - any public method with this annotation is a test case, regardless
* of its name
* <p>
* <p>
@@ -148,6 +148,8 @@ public abstract class LuceneTestCase ext
public static final boolean TEST_NIGHTLY = Boolean.parseBoolean(System.getProperty("tests.nightly", "false"));
/** the line file used by LineFileDocs */
public static final String TEST_LINE_DOCS_FILE = System.getProperty("tests.linedocsfile", "europarl.lines.txt.gz");
+ /** whether or not to clean threads between test invocations: "false", "perMethod", "perClass" */
+ public static final String TEST_CLEAN_THREADS = System.getProperty("tests.cleanthreads", "perClass");
private static final Pattern codecWithParam = Pattern.compile("(.*)\\(\\s*(\\d+)\\s*\\)");
@@ -342,10 +344,12 @@ public abstract class LuceneTestCase ext
@AfterClass
public static void afterClassLuceneTestCaseJ4() {
- int rogueThreads = threadCleanup("test class");
- if (rogueThreads > 0) {
- // TODO: fail here once the leaks are fixed.
- System.err.println("RESOURCE LEAK: test class left " + rogueThreads + " thread(s) running");
+ if (! "false".equals(TEST_CLEAN_THREADS)) {
+ int rogueThreads = threadCleanup("test class");
+ if (rogueThreads > 0) {
+ // TODO: fail here once the leaks are fixed.
+ System.err.println("RESOURCE LEAK: test class left " + rogueThreads + " thread(s) running");
+ }
}
String codecDescription;
CodecProvider cp = CodecProvider.getDefault();
@@ -490,7 +494,7 @@ public abstract class LuceneTestCase ext
assertTrue("ensure your setUp() calls super.setUp()!!!", setup);
setup = false;
BooleanQuery.setMaxClauseCount(savedBoolMaxClauseCount);
- if (!getClass().getName().startsWith("org.apache.solr")) {
+ if ("perMethod".equals(TEST_CLEAN_THREADS)) {
int rogueThreads = threadCleanup("test method: '" + getName() + "'");
if (rogueThreads > 0) {
System.err.println("RESOURCE LEAK: test method: '" + getName()
@@ -534,7 +538,7 @@ public abstract class LuceneTestCase ext
}
}
- private final static int THREAD_STOP_GRACE_MSEC = 1000;
+ private final static int THREAD_STOP_GRACE_MSEC = 50;
// jvm-wide list of 'rogue threads' we found, so they only get reported once.
private final static IdentityHashMap<Thread,Boolean> rogueThreads = new IdentityHashMap<Thread,Boolean>();
@@ -586,10 +590,8 @@ public abstract class LuceneTestCase ext
// try to stop the thread:
t.setUncaughtExceptionHandler(null);
Thread.setDefaultUncaughtExceptionHandler(null);
- t.interrupt();
- try {
- t.join(THREAD_STOP_GRACE_MSEC);
- } catch (InterruptedException e) { e.printStackTrace(); }
+ if (!t.getName().startsWith("SyncThread")) // avoid zookeeper jre crash
+ t.interrupt();
}
}
}
@@ -612,7 +614,7 @@ public abstract class LuceneTestCase ext
* directly in the same scope as the IndexReader.
* </p>
*
- * @see FieldCacheSanityChecker
+ * @see org.apache.lucene.util.FieldCacheSanityChecker
*/
protected void assertSaneFieldCaches(final String msg) {
final CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries();
@@ -899,26 +901,50 @@ public abstract class LuceneTestCase ext
return dir;
}
+
+ /** Returns a new field instance.
+ * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
public static Field newField(String name, String value, Index index) {
return newField(random, name, value, index);
}
+
+ /** Returns a new field instance.
+ * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
public static Field newField(String name, String value, Store store, Index index) {
return newField(random, name, value, store, index);
}
-
+
+ /**
+ * Returns a new Field instance. Use this when the test does not
+ * care about some specific field settings (most tests)
+ * <ul>
+ * <li>If the store value is set to Store.NO, sometimes the field will be randomly stored.
+ * <li>More term vector data than you ask for might be indexed, for example if you choose YES
+ * it might index term vectors with offsets too.
+ * </ul>
+ */
public static Field newField(String name, String value, Store store, Index index, TermVector tv) {
return newField(random, name, value, store, index, tv);
}
+
+ /** Returns a new field instance, using the specified random.
+ * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
public static Field newField(Random random, String name, String value, Index index) {
return newField(random, name, value, Store.NO, index);
}
+
+ /** Returns a new field instance, using the specified random.
+ * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
public static Field newField(Random random, String name, String value, Store store, Index index) {
return newField(random, name, value, store, index, TermVector.NO);
}
+
+ /** Returns a new field instance, using the specified random.
+ * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
public static Field newField(Random random, String name, String value, Store store, Index index, TermVector tv) {
if (!index.isIndexed())
return new Field(name, value, store, index);
Modified: lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java Wed Mar 30 09:17:25 2011
@@ -27,6 +27,7 @@ import java.io.OutputStream;
import java.io.PrintStream;
import java.lang.reflect.Method;
import java.util.Enumeration;
+import java.util.List;
import java.util.Random;
import java.util.Map;
import java.util.HashMap;
@@ -35,8 +36,11 @@ import java.util.zip.ZipFile;
import org.junit.Assert;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.ConcurrentMergeScheduler;
+import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LogMergePolicy;
import org.apache.lucene.index.MergeScheduler;
@@ -325,4 +329,13 @@ public class _TestUtil {
throw new RuntimeException(e);
}
}
+
+ /** Adds field info for a Document. */
+ public static void add(Document doc, FieldInfos fieldInfos) {
+ List<Fieldable> fields = doc.getFields();
+ for (Fieldable field : fields) {
+ fieldInfos.addOrUpdate(field.name(), field.isIndexed(), field.isTermVectorStored(), field.isStorePositionWithTermVector(),
+ field.isStoreOffsetWithTermVector(), field.getOmitNorms(), false, field.getOmitTermFreqAndPositions());
+ }
+ }
}
Modified: lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/automaton/AutomatonTestUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/automaton/AutomatonTestUtil.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/automaton/AutomatonTestUtil.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test-framework/org/apache/lucene/util/automaton/AutomatonTestUtil.java Wed Mar 30 09:17:25 2011
@@ -373,4 +373,28 @@ public class AutomatonTestUtil {
a.removeDeadTransitions();
}
+ /**
+ * Returns true if the language of this automaton is finite.
+ * <p>
+ * WARNING: this method is slow, it will blow up if the automaton is large.
+ * this is only used to test the correctness of our faster implementation.
+ */
+ public static boolean isFiniteSlow(Automaton a) {
+ if (a.isSingleton()) return true;
+ return isFiniteSlow(a.initial, new HashSet<State>());
+ }
+
+ /**
+ * Checks whether there is a loop containing s. (This is sufficient since
+ * there are never transitions to dead states.)
+ */
+ // TODO: not great that this is recursive... in theory a
+ // large automata could exceed java's stack
+ private static boolean isFiniteSlow(State s, HashSet<State> path) {
+ path.add(s);
+ for (Transition t : s.getTransitions())
+ if (path.contains(t.to) || !isFiniteSlow(t.to, path)) return false;
+ path.remove(s);
+ return true;
+ }
}
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/analysis/TestNumericTokenStream.java Wed Mar 30 09:17:25 2011
@@ -35,13 +35,13 @@ public class TestNumericTokenStream exte
final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
final NumericTokenStream.NumericTermAttribute numericAtt = stream.getAttribute(NumericTokenStream.NumericTermAttribute.class);
- final BytesRef bytes = new BytesRef();
+ final BytesRef bytes = bytesAtt.getBytesRef();
stream.reset();
assertEquals(64, numericAtt.getValueSize());
for (int shift=0; shift<64; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
assertTrue("New token is available", stream.incrementToken());
assertEquals("Shift value wrong", shift, numericAtt.getShift());
- final int hash = bytesAtt.toBytesRef(bytes);
+ final int hash = bytesAtt.fillBytesRef();
assertEquals("Hash incorrect", bytes.hashCode(), hash);
assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), NumericUtils.prefixCodedToLong(bytes));
assertEquals("Term raw value is incorrectly encoded", lvalue & ~((1L << shift) - 1L), numericAtt.getRawValue());
@@ -58,13 +58,13 @@ public class TestNumericTokenStream exte
final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
final NumericTokenStream.NumericTermAttribute numericAtt = stream.getAttribute(NumericTokenStream.NumericTermAttribute.class);
- final BytesRef bytes = new BytesRef();
+ final BytesRef bytes = bytesAtt.getBytesRef();
stream.reset();
assertEquals(32, numericAtt.getValueSize());
for (int shift=0; shift<32; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
assertTrue("New token is available", stream.incrementToken());
assertEquals("Shift value wrong", shift, numericAtt.getShift());
- final int hash = bytesAtt.toBytesRef(bytes);
+ final int hash = bytesAtt.fillBytesRef();
assertEquals("Hash incorrect", bytes.hashCode(), hash);
assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), NumericUtils.prefixCodedToInt(bytes));
assertEquals("Term raw value is incorrectly encoded", ((long) ivalue) & ~((1L << shift) - 1L), numericAtt.getRawValue());
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/Test2BTerms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/Test2BTerms.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/Test2BTerms.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/Test2BTerms.java Wed Mar 30 09:17:25 2011
@@ -74,12 +74,14 @@ public class Test2BTerms extends LuceneT
}
private final static class MyTermAttributeImpl extends AttributeImpl implements TermToBytesRefAttribute {
- public int toBytesRef(BytesRef bs) {
- bs.bytes = bytes.bytes;
- bs.offset = bytes.offset;
- bs.length = bytes.length;
+ public int fillBytesRef() {
return bytes.hashCode();
}
+
+ public BytesRef getBytesRef() {
+ return bytes;
+ }
+
@Override
public void clear() {
}
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Wed Mar 30 09:17:25 2011
@@ -938,7 +938,41 @@ public class TestAddIndexes extends Luce
assertTrue(c.failures.size() == 0);
}
+
+ // LUCENE-2996: tests that addIndexes(IndexReader) applies existing deletes correctly.
+ public void testExistingDeletes() throws Exception {
+ Directory[] dirs = new Directory[2];
+ for (int i = 0; i < dirs.length; i++) {
+ dirs[i] = newDirectory();
+ IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
+ IndexWriter writer = new IndexWriter(dirs[i], conf);
+ Document doc = new Document();
+ doc.add(new Field("id", "myid", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+ writer.addDocument(doc);
+ writer.close();
+ }
+
+ IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
+ IndexWriter writer = new IndexWriter(dirs[0], conf);
+ // Now delete the document
+ writer.deleteDocuments(new Term("id", "myid"));
+ IndexReader r = IndexReader.open(dirs[1]);
+ try {
+ writer.addIndexes(r);
+ } finally {
+ r.close();
+ }
+ writer.commit();
+ assertEquals("Documents from the incoming index should not have been deleted", 1, writer.numDocs());
+ writer.close();
+
+ for (Directory dir : dirs) {
+ dir.close();
+ }
+
+ }
+
private void addDocs3(IndexWriter writer, int numDocs) throws IOException {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
@@ -1045,8 +1079,8 @@ public class TestAddIndexes extends Luce
IndexWriter w3 = new IndexWriter(dir, conf);
w3.addIndexes(readers);
w3.close();
-
- assertEquals("Only one compound segment should exist", 3, dir.listAll().length);
+ // we should now see segments_X, segments.gen,_Y.cfs, _Z.fnx
+ assertEquals("Only one compound segment should exist", 4, dir.listAll().length);
}
}
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Wed Mar 30 09:17:25 2011
@@ -37,6 +37,7 @@ import org.apache.lucene.search.FieldCac
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
@@ -515,8 +516,8 @@ public class TestBackwardsCompatibility
assertEquals("didn't delete the right number of documents", 1, delCount);
// Set one norm so we get a .s0 file:
- SimilarityProvider sim = new DefaultSimilarity();
- reader.setNorm(21, "content", sim.get("content").encodeNormValue(1.5f));
+ Similarity sim = new DefaultSimilarity();
+ reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
reader.close();
// The numbering of fields can vary depending on which
@@ -542,7 +543,8 @@ public class TestBackwardsCompatibility
"_0_1.del",
"_0_1.s" + contentFieldIndex,
"segments_2",
- "segments.gen"};
+ "segments.gen",
+ "1.fnx"};
String[] actual = dir.listAll();
Arrays.sort(expected);
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestCodecs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestCodecs.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestCodecs.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestCodecs.java Wed Mar 30 09:17:25 2011
@@ -80,7 +80,7 @@ public class TestCodecs extends LuceneTe
public FieldData(final String name, final FieldInfos fieldInfos, final TermData[] terms, final boolean omitTF, final boolean storePayloads) {
this.omitTF = omitTF;
this.storePayloads = storePayloads;
- fieldInfos.add(name, true);
+ fieldInfos.addOrUpdate(name, true);
fieldInfo = fieldInfos.fieldInfo(name);
fieldInfo.omitTermFreqAndPositions = omitTF;
fieldInfo.storePayloads = storePayloads;
@@ -240,7 +240,8 @@ public class TestCodecs extends LuceneTe
final Directory dir = newDirectory();
FieldInfos clonedFieldInfos = (FieldInfos) fieldInfos.clone();
this.write(fieldInfos, dir, fields, true);
- final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, false, SegmentCodecs.build(clonedFieldInfos, CodecProvider.getDefault()), clonedFieldInfos);
+ final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, false, true, clonedFieldInfos.buildSegmentCodecs(false), clonedFieldInfos.hasVectors(), clonedFieldInfos);
+ si.setHasProx(false);
final FieldsProducer reader = si.getSegmentCodecs().codec().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, 64, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR));
@@ -292,7 +293,7 @@ public class TestCodecs extends LuceneTe
FieldInfos clonedFieldInfos = (FieldInfos) fieldInfos.clone();
this.write(fieldInfos, dir, fields, false);
- final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, false, SegmentCodecs.build(clonedFieldInfos, CodecProvider.getDefault()), clonedFieldInfos);
+ final SegmentInfo si = new SegmentInfo(SEGMENT, 10000, dir, false, true, clonedFieldInfos.buildSegmentCodecs(false), clonedFieldInfos.hasVectors(), clonedFieldInfos);
if (VERBOSE) {
System.out.println("TEST: now read postings");
@@ -441,7 +442,7 @@ public class TestCodecs extends LuceneTe
for(int iter=0;iter<NUM_TEST_ITER;iter++) {
final FieldData field = fields[TestCodecs.random.nextInt(fields.length)];
final TermsEnum termsEnum = termsDict.terms(field.fieldInfo.name).iterator();
-
+ assertTrue(field.fieldInfo.getCodecId() != FieldInfo.UNASSIGNED_CODEC_ID);
if (si.getSegmentCodecs().codecs[field.fieldInfo.getCodecId()] instanceof PreFlexCodec) {
// code below expects unicode sort order
continue;
@@ -590,12 +591,13 @@ public class TestCodecs extends LuceneTe
private void write(final FieldInfos fieldInfos, final Directory dir, final FieldData[] fields, boolean allowPreFlex) throws Throwable {
final int termIndexInterval = _TestUtil.nextInt(random, 13, 27);
- final SegmentCodecs codecInfo = SegmentCodecs.build(fieldInfos, CodecProvider.getDefault());
+ final SegmentCodecs codecInfo = fieldInfos.buildSegmentCodecs(false);
final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codecInfo, null);
final FieldsConsumer consumer = state.segmentCodecs.codec().fieldsConsumer(state);
Arrays.sort(fields);
for (final FieldData field : fields) {
+ assertTrue(field.fieldInfo.getCodecId() != FieldInfo.UNASSIGNED_CODEC_ID);
if (!allowPreFlex && codecInfo.codecs[field.fieldInfo.getCodecId()] instanceof PreFlexCodec) {
// code below expects unicode sort order
continue;
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestDoc.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestDoc.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestDoc.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestDoc.java Wed Mar 30 09:17:25 2011
@@ -202,10 +202,10 @@ public class TestDoc extends LuceneTestC
merger.merge();
r1.close();
r2.close();
-
+ final FieldInfos fieldInfos = merger.fieldInfos();
final SegmentInfo info = new SegmentInfo(merged, si1.docCount + si2.docCount, si1.dir,
- false, merger.getSegmentCodecs(),
- merger.fieldInfos());
+ false, fieldInfos.hasProx(), merger.getSegmentCodecs(),
+ fieldInfos.hasVectors(), fieldInfos);
if (useCompoundFile) {
Collection<String> filesToDelete = merger.createCompoundFile(merged + ".cfs", info);
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFieldInfos.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFieldInfos.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFieldInfos.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFieldInfos.java Wed Mar 30 09:17:25 2011
@@ -18,11 +18,13 @@ package org.apache.lucene.index;
*/
import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
import org.apache.lucene.document.Document;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import java.io.IOException;
+import java.util.Arrays;
//import org.cnlp.utils.properties.ResourceBundleHelper;
@@ -36,44 +38,124 @@ public class TestFieldInfos extends Luce
DocHelper.setupDoc(testDoc);
}
- public void test() throws IOException {
- //Positive test of FieldInfos
+ public FieldInfos createAndWriteFieldInfos(Directory dir, String filename) throws IOException{
+ //Positive test of FieldInfos
assertTrue(testDoc != null);
FieldInfos fieldInfos = new FieldInfos();
- fieldInfos.add(testDoc);
+ _TestUtil.add(testDoc, fieldInfos);
//Since the complement is stored as well in the fields map
assertTrue(fieldInfos.size() == DocHelper.all.size()); //this is all b/c we are using the no-arg constructor
- Directory dir = newDirectory();
- String name = "testFile";
- IndexOutput output = dir.createOutput(name);
+
+
+ IndexOutput output = dir.createOutput(filename);
assertTrue(output != null);
//Use a RAMOutputStream
-
- fieldInfos.write(output);
- output.close();
- assertTrue(dir.fileLength(name) > 0);
- FieldInfos readIn = new FieldInfos(dir, name);
- assertTrue(fieldInfos.size() == readIn.size());
- FieldInfo info = readIn.fieldInfo("textField1");
- assertTrue(info != null);
- assertTrue(info.storeTermVector == false);
- assertTrue(info.omitNorms == false);
-
- info = readIn.fieldInfo("textField2");
- assertTrue(info != null);
- assertTrue(info.storeTermVector == true);
- assertTrue(info.omitNorms == false);
-
- info = readIn.fieldInfo("textField3");
- assertTrue(info != null);
- assertTrue(info.storeTermVector == false);
- assertTrue(info.omitNorms == true);
-
- info = readIn.fieldInfo("omitNorms");
- assertTrue(info != null);
- assertTrue(info.storeTermVector == false);
- assertTrue(info.omitNorms == true);
+
+ fieldInfos.write(output);
+ output.close();
+ return fieldInfos;
+ }
+ public void test() throws IOException {
+ String name = "testFile";
+ Directory dir = newDirectory();
+ FieldInfos fieldInfos = createAndWriteFieldInfos(dir, name);
+ assertTrue(dir.fileLength(name) > 0);
+ FieldInfos readIn = new FieldInfos(dir, name);
+ assertTrue(fieldInfos.size() == readIn.size());
+ FieldInfo info = readIn.fieldInfo("textField1");
+ assertTrue(info != null);
+ assertTrue(info.storeTermVector == false);
+ assertTrue(info.omitNorms == false);
+
+ info = readIn.fieldInfo("textField2");
+ assertTrue(info != null);
+ assertTrue(info.storeTermVector == true);
+ assertTrue(info.omitNorms == false);
+
+ info = readIn.fieldInfo("textField3");
+ assertTrue(info != null);
+ assertTrue(info.storeTermVector == false);
+ assertTrue(info.omitNorms == true);
+
+ info = readIn.fieldInfo("omitNorms");
+ assertTrue(info != null);
+ assertTrue(info.storeTermVector == false);
+ assertTrue(info.omitNorms == true);
- dir.close();
+ dir.close();
+ }
+
+ public void testReadOnly() throws IOException {
+ String name = "testFile";
+ Directory dir = newDirectory();
+ FieldInfos fieldInfos = createAndWriteFieldInfos(dir, name);
+ FieldInfos readOnly = new FieldInfos(dir, name);
+ assertReadOnly(readOnly, fieldInfos);
+ FieldInfos readOnlyClone = (FieldInfos)readOnly.clone();
+ assertNotSame(readOnly, readOnlyClone);
+ // clone is also read only - no global field map
+ assertReadOnly(readOnlyClone, fieldInfos);
+ dir.close();
+ }
+
+ private void assertReadOnly(FieldInfos readOnly, FieldInfos modifiable) {
+ assertTrue(readOnly.isReadOnly());
+ assertFalse(modifiable.isReadOnly());
+ try {
+ readOnly.add(modifiable.fieldInfo(0));
+ fail("instance should be read only");
+ } catch (IllegalStateException e) {
+ // expected
+ }
+
+ try {
+ readOnly.addOrUpdate("bogus", random.nextBoolean());
+ fail("instance should be read only");
+ } catch (IllegalStateException e) {
+ // expected
+ }
+ try {
+ readOnly.addOrUpdate("bogus", random.nextBoolean(), random.nextBoolean());
+ fail("instance should be read only");
+ } catch (IllegalStateException e) {
+ // expected
+ }
+ try {
+ readOnly.addOrUpdate("bogus", random.nextBoolean(), random.nextBoolean(),
+ random.nextBoolean(), random.nextBoolean());
+ fail("instance should be read only");
+ } catch (IllegalStateException e) {
+ // expected
+ }
+ try {
+ readOnly.addOrUpdate("bogus", random.nextBoolean(), random.nextBoolean(),
+ random.nextBoolean(), random.nextBoolean(), random.nextBoolean());
+ fail("instance should be read only");
+ } catch (IllegalStateException e) {
+ // expected
+ }
+ try {
+ readOnly.addOrUpdate("bogus", random.nextBoolean(), random.nextBoolean(),
+ random.nextBoolean(), random.nextBoolean(), random.nextBoolean(),
+ random.nextBoolean(), random.nextBoolean());
+ fail("instance should be read only");
+ } catch (IllegalStateException e) {
+ // expected
+ }
+ try {
+ readOnly.addOrUpdate(Arrays.asList("a", "b", "c"), random.nextBoolean());
+ fail("instance should be read only");
+ } catch (IllegalStateException e) {
+ // expected
+ }
+
+ assertEquals(modifiable.size(), readOnly.size());
+ // assert we can iterate
+ for (FieldInfo fi : readOnly) {
+ assertEquals(fi.name, modifiable.fieldName(fi.number));
+ }
+
}
+
+
}
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java Wed Mar 30 09:17:25 2011
@@ -49,7 +49,7 @@ public class TestFieldsReader extends Lu
super.setUp();
fieldInfos = new FieldInfos();
DocHelper.setupDoc(testDoc);
- fieldInfos.add(testDoc);
+ _TestUtil.add(testDoc, fieldInfos);
dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(newLogMergePolicy());
((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(false);
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Wed Mar 30 09:17:25 2011
@@ -68,7 +68,7 @@ public class TestIndexFileDeleter extend
Term searchTerm = new Term("id", "7");
int delCount = reader.deleteDocuments(searchTerm);
assertEquals("didn't delete the right number of documents", 1, delCount);
- Similarity sim = new DefaultSimilarity().get("content");
+ Similarity sim = new DefaultSimilarity();
// Set one norm so we get a .s0 file:
reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
reader.close();
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexInput.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexInput.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexInput.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexInput.java Wed Mar 30 09:17:25 2011
@@ -19,51 +19,60 @@ package org.apache.lucene.index;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RAMDirectory;
import java.io.IOException;
public class TestIndexInput extends LuceneTestCase {
- public void testRead() throws IOException {
- IndexInput is = new MockIndexInput(new byte[] {
- (byte) 0x80, 0x01,
- (byte) 0xFF, 0x7F,
- (byte) 0x80, (byte) 0x80, 0x01,
- (byte) 0x81, (byte) 0x80, 0x01,
- 0x06, 'L', 'u', 'c', 'e', 'n', 'e',
-
- // 2-byte UTF-8 (U+00BF "INVERTED QUESTION MARK")
- 0x02, (byte) 0xC2, (byte) 0xBF,
- 0x0A, 'L', 'u', (byte) 0xC2, (byte) 0xBF,
- 'c', 'e', (byte) 0xC2, (byte) 0xBF,
- 'n', 'e',
-
- // 3-byte UTF-8 (U+2620 "SKULL AND CROSSBONES")
- 0x03, (byte) 0xE2, (byte) 0x98, (byte) 0xA0,
- 0x0C, 'L', 'u', (byte) 0xE2, (byte) 0x98, (byte) 0xA0,
- 'c', 'e', (byte) 0xE2, (byte) 0x98, (byte) 0xA0,
- 'n', 'e',
-
- // surrogate pairs
- // (U+1D11E "MUSICAL SYMBOL G CLEF")
- // (U+1D160 "MUSICAL SYMBOL EIGHTH NOTE")
- 0x04, (byte) 0xF0, (byte) 0x9D, (byte) 0x84, (byte) 0x9E,
- 0x08, (byte) 0xF0, (byte) 0x9D, (byte) 0x84, (byte) 0x9E,
- (byte) 0xF0, (byte) 0x9D, (byte) 0x85, (byte) 0xA0,
- 0x0E, 'L', 'u',
- (byte) 0xF0, (byte) 0x9D, (byte) 0x84, (byte) 0x9E,
- 'c', 'e',
- (byte) 0xF0, (byte) 0x9D, (byte) 0x85, (byte) 0xA0,
- 'n', 'e',
-
- // null bytes
- 0x01, 0x00,
- 0x08, 'L', 'u', 0x00, 'c', 'e', 0x00, 'n', 'e',
- });
-
+
+ static final byte[] READ_TEST_BYTES = new byte[] {
+ (byte) 0x80, 0x01,
+ (byte) 0xFF, 0x7F,
+ (byte) 0x80, (byte) 0x80, 0x01,
+ (byte) 0x81, (byte) 0x80, 0x01,
+ (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0x07,
+ (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0x07,
+ (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0x7F,
+ 0x06, 'L', 'u', 'c', 'e', 'n', 'e',
+
+ // 2-byte UTF-8 (U+00BF "INVERTED QUESTION MARK")
+ 0x02, (byte) 0xC2, (byte) 0xBF,
+ 0x0A, 'L', 'u', (byte) 0xC2, (byte) 0xBF,
+ 'c', 'e', (byte) 0xC2, (byte) 0xBF,
+ 'n', 'e',
+
+ // 3-byte UTF-8 (U+2620 "SKULL AND CROSSBONES")
+ 0x03, (byte) 0xE2, (byte) 0x98, (byte) 0xA0,
+ 0x0C, 'L', 'u', (byte) 0xE2, (byte) 0x98, (byte) 0xA0,
+ 'c', 'e', (byte) 0xE2, (byte) 0x98, (byte) 0xA0,
+ 'n', 'e',
+
+ // surrogate pairs
+ // (U+1D11E "MUSICAL SYMBOL G CLEF")
+ // (U+1D160 "MUSICAL SYMBOL EIGHTH NOTE")
+ 0x04, (byte) 0xF0, (byte) 0x9D, (byte) 0x84, (byte) 0x9E,
+ 0x08, (byte) 0xF0, (byte) 0x9D, (byte) 0x84, (byte) 0x9E,
+ (byte) 0xF0, (byte) 0x9D, (byte) 0x85, (byte) 0xA0,
+ 0x0E, 'L', 'u',
+ (byte) 0xF0, (byte) 0x9D, (byte) 0x84, (byte) 0x9E,
+ 'c', 'e',
+ (byte) 0xF0, (byte) 0x9D, (byte) 0x85, (byte) 0xA0,
+ 'n', 'e',
+
+ // null bytes
+ 0x01, 0x00,
+ 0x08, 'L', 'u', 0x00, 'c', 'e', 0x00, 'n', 'e',
+ };
+
+ private void checkReads(IndexInput is) throws IOException {
assertEquals(128,is.readVInt());
assertEquals(16383,is.readVInt());
assertEquals(16384,is.readVInt());
assertEquals(16385,is.readVInt());
+ assertEquals(Integer.MAX_VALUE, is.readVInt());
+ assertEquals((long) Integer.MAX_VALUE, is.readVLong());
+ assertEquals(Long.MAX_VALUE, is.readVLong());
assertEquals("Lucene",is.readString());
assertEquals("\u00BF",is.readString());
@@ -79,4 +88,24 @@ public class TestIndexInput extends Luce
assertEquals("\u0000",is.readString());
assertEquals("Lu\u0000ce\u0000ne",is.readString());
}
+
+ // this test only checks BufferedIndexInput because MockIndexInput extends BufferedIndexInput
+ public void testBufferedIndexInputRead() throws IOException {
+ final IndexInput is = new MockIndexInput(READ_TEST_BYTES);
+ checkReads(is);
+ is.close();
+ }
+
+ // this test checks the raw IndexInput methods as it uses RAMIndexInput which extends IndexInput directly
+ public void testRawIndexInputRead() throws IOException {
+ final RAMDirectory dir = new RAMDirectory();
+ final IndexOutput os = dir.createOutput("foo");
+ os.writeBytes(READ_TEST_BYTES, READ_TEST_BYTES.length);
+ os.close();
+ final IndexInput is = dir.openInput("foo");
+ checkReads(is);
+ is.close();
+ dir.close();
+ }
+
}
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReader.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReader.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReader.java Wed Mar 30 09:17:25 2011
@@ -465,7 +465,7 @@ public class TestIndexReader extends Luc
// expected
}
- Similarity sim = new DefaultSimilarity().get("aaa");
+ Similarity sim = new DefaultSimilarity();
try {
reader.setNorm(5, "aaa", sim.encodeNormValue(2.0f));
fail("setNorm after close failed to throw IOException");
@@ -506,7 +506,7 @@ public class TestIndexReader extends Luc
// expected
}
- Similarity sim = new DefaultSimilarity().get("aaa");
+ Similarity sim = new DefaultSimilarity();
try {
reader.setNorm(5, "aaa", sim.encodeNormValue(2.0f));
fail("setNorm should have hit LockObtainFailedException");
@@ -538,7 +538,7 @@ public class TestIndexReader extends Luc
// now open reader & set norm for doc 0
IndexReader reader = IndexReader.open(dir, false);
- Similarity sim = new DefaultSimilarity().get("content");
+ Similarity sim = new DefaultSimilarity();
reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
// we should be holding the write lock now:
@@ -583,7 +583,7 @@ public class TestIndexReader extends Luc
addDoc(writer, searchTerm.text());
writer.close();
- Similarity sim = new DefaultSimilarity().get("content");
+ Similarity sim = new DefaultSimilarity();
// now open reader & set norm for doc 0 (writes to
// _0_1.s0)
reader = IndexReader.open(dir, false);
@@ -969,7 +969,7 @@ public class TestIndexReader extends Luc
dir.setMaxSizeInBytes(thisDiskFree);
dir.setRandomIOExceptionRate(rate);
- Similarity sim = new DefaultSimilarity().get("content");
+ Similarity sim = new DefaultSimilarity();
try {
if (0 == x) {
int docId = 12;
@@ -1124,7 +1124,7 @@ public class TestIndexReader extends Luc
}
reader = IndexReader.open(dir, false);
- Similarity sim = new DefaultSimilarity().get("content");
+ Similarity sim = new DefaultSimilarity();
try {
reader.setNorm(1, "content", sim.encodeNormValue(2.0f));
fail("did not hit exception when calling setNorm on an invalid doc number");
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java Wed Mar 30 09:17:25 2011
@@ -273,7 +273,7 @@ public class TestIndexReaderClone extend
* @throws Exception
*/
private void performDefaultTests(IndexReader r1) throws Exception {
- Similarity sim = new DefaultSimilarity().get("field1");
+ Similarity sim = new DefaultSimilarity();
float norm1 = sim.decodeNormValue(MultiNorms.norms(r1, "field1")[4]);
IndexReader pr1Clone = (IndexReader) r1.clone();
@@ -329,7 +329,7 @@ public class TestIndexReaderClone extend
TestIndexReaderReopen.createIndex(random, dir1, false);
SegmentReader origSegmentReader = getOnlySegmentReader(IndexReader.open(dir1, false));
origSegmentReader.deleteDocument(1);
- Similarity sim = new DefaultSimilarity().get("field1");
+ Similarity sim = new DefaultSimilarity();
origSegmentReader.setNorm(4, "field1", sim.encodeNormValue(0.5f));
SegmentReader clonedSegmentReader = (SegmentReader) origSegmentReader
@@ -429,7 +429,7 @@ public class TestIndexReaderClone extend
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, false);
IndexReader orig = IndexReader.open(dir1, false);
- Similarity sim = new DefaultSimilarity().get("field1");
+ Similarity sim = new DefaultSimilarity();
orig.setNorm(1, "field1", sim.encodeNormValue(17.0f));
final byte encoded = sim.encodeNormValue(17.0f);
assertEquals(encoded, MultiNorms.norms(orig, "field1")[1]);
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java Wed Mar 30 09:17:25 2011
@@ -31,6 +31,7 @@ import org.apache.lucene.document.Field.
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.SegmentReader.Norm;
import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.DefaultSimilarityProvider;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.Directory;
@@ -41,17 +42,22 @@ import org.apache.lucene.util.LuceneTest
*/
public class TestIndexReaderCloneNorms extends LuceneTestCase {
- private class SimilarityOne extends DefaultSimilarity {
+ private class SimilarityProviderOne extends DefaultSimilarityProvider {
@Override
- public float computeNorm(FieldInvertState state) {
- // diable length norm
- return state.getBoost();
- }
+ public Similarity get(String field) {
+ return new DefaultSimilarity() {
+ @Override
+ public float computeNorm(FieldInvertState state) {
+ // diable length norm
+ return state.getBoost();
+ }
+ };
+ }
}
private static final int NUM_FIELDS = 10;
- private SimilarityProvider similarityOne;
+ private SimilarityProvider similarityProviderOne;
private Analyzer anlzr;
@@ -68,7 +74,7 @@ public class TestIndexReaderCloneNorms e
@Override
public void setUp() throws Exception {
super.setUp();
- similarityOne = new SimilarityOne();
+ similarityProviderOne = new SimilarityProviderOne();
anlzr = new MockAnalyzer();
}
@@ -211,7 +217,7 @@ public class TestIndexReaderCloneNorms e
IndexReader reader4C = (IndexReader) reader3C.clone();
SegmentReader segmentReader4C = getOnlySegmentReader(reader4C);
assertEquals(4, reader3CCNorm.bytesRef().get());
- Similarity sim = new DefaultSimilarity().get("field1");
+ Similarity sim = new DefaultSimilarity();
reader4C.setNorm(5, "field1", sim.encodeNormValue(0.33f));
// generate a cannot update exception in reader1
@@ -249,7 +255,7 @@ public class TestIndexReaderCloneNorms e
}
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.CREATE)
- .setMaxBufferedDocs(5).setSimilarityProvider(similarityOne).setMergePolicy(newLogMergePolicy()));
+ .setMaxBufferedDocs(5).setSimilarityProvider(similarityProviderOne).setMergePolicy(newLogMergePolicy()));
LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy();
lmp.setMergeFactor(3);
@@ -272,7 +278,7 @@ public class TestIndexReaderCloneNorms e
// System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
modifiedNorms.set(i, Float.valueOf(newNorm));
modifiedNorms.set(k, Float.valueOf(origNorm));
- Similarity sim = new DefaultSimilarity().get("f" + 1);
+ Similarity sim = new DefaultSimilarity();
ir.setNorm(i, "f" + 1, sim.encodeNormValue(newNorm));
ir.setNorm(k, "f" + 1, sim.encodeNormValue(origNorm));
// System.out.println("setNorm i: "+i);
@@ -294,7 +300,7 @@ public class TestIndexReaderCloneNorms e
assertEquals("number of norms mismatches", numDocNorms, b.length);
ArrayList<Float> storedNorms = (i == 1 ? modifiedNorms : norms);
for (int j = 0; j < b.length; j++) {
- Similarity sim = new DefaultSimilarity().get(field);
+ Similarity sim = new DefaultSimilarity();
float norm = sim.decodeNormValue(b[j]);
float norm1 = storedNorms.get(j).floatValue();
assertEquals("stored norm value of " + field + " for doc " + j + " is "
@@ -307,7 +313,7 @@ public class TestIndexReaderCloneNorms e
throws IOException {
IndexWriterConfig conf = newIndexWriterConfig(
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
- .setMaxBufferedDocs(5).setSimilarityProvider(similarityOne).setMergePolicy(newLogMergePolicy());
+ .setMaxBufferedDocs(5).setSimilarityProvider(similarityProviderOne).setMergePolicy(newLogMergePolicy());
LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
lmp.setMergeFactor(3);
lmp.setUseCompoundFile(compound);
@@ -334,7 +340,7 @@ public class TestIndexReaderCloneNorms e
// return unique norm values that are unchanged by encoding/decoding
private float nextNorm(String fname) {
float norm = lastNorm + normDelta;
- Similarity sim = new DefaultSimilarity().get(fname);
+ Similarity sim = new DefaultSimilarity();
do {
float norm1 = sim.decodeNormValue(
sim.encodeNormValue(norm));
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java Wed Mar 30 09:17:25 2011
@@ -617,9 +617,9 @@ public class TestIndexReaderReopen exten
IndexReader reader2 = reader1.reopen();
modifier = IndexReader.open(dir1, false);
- SimilarityProvider sim = new DefaultSimilarity();
- modifier.setNorm(1, "field1", sim.get("field1").encodeNormValue(50f));
- modifier.setNorm(1, "field2", sim.get("field2").encodeNormValue(50f));
+ Similarity sim = new DefaultSimilarity();
+ modifier.setNorm(1, "field1", sim.encodeNormValue(50f));
+ modifier.setNorm(1, "field2", sim.encodeNormValue(50f));
modifier.close();
IndexReader reader3 = reader2.reopen();
@@ -712,7 +712,7 @@ public class TestIndexReaderReopen exten
protected void modifyIndex(int i) throws IOException {
if (i % 3 == 0) {
IndexReader modifier = IndexReader.open(dir, false);
- Similarity sim = new DefaultSimilarity().get("field1");
+ Similarity sim = new DefaultSimilarity();
modifier.setNorm(i, "field1", sim.encodeNormValue(50f));
modifier.close();
} else if (i % 3 == 1) {
@@ -993,10 +993,10 @@ public class TestIndexReaderReopen exten
}
case 1: {
IndexReader reader = IndexReader.open(dir, false);
- SimilarityProvider sim = new DefaultSimilarity();
- reader.setNorm(4, "field1", sim.get("field1").encodeNormValue(123f));
- reader.setNorm(44, "field2", sim.get("field2").encodeNormValue(222f));
- reader.setNorm(44, "field4", sim.get("field4").encodeNormValue(22f));
+ Similarity sim = new DefaultSimilarity();
+ reader.setNorm(4, "field1", sim.encodeNormValue(123f));
+ reader.setNorm(44, "field2", sim.encodeNormValue(222f));
+ reader.setNorm(44, "field4", sim.encodeNormValue(22f));
reader.close();
break;
}
@@ -1017,9 +1017,9 @@ public class TestIndexReaderReopen exten
}
case 4: {
IndexReader reader = IndexReader.open(dir, false);
- SimilarityProvider sim = new DefaultSimilarity();
- reader.setNorm(5, "field1", sim.get("field1").encodeNormValue(123f));
- reader.setNorm(55, "field2", sim.get("field2").encodeNormValue(222f));
+ Similarity sim = new DefaultSimilarity();
+ reader.setNorm(5, "field1", sim.encodeNormValue(123f));
+ reader.setNorm(55, "field2", sim.encodeNormValue(222f));
reader.close();
break;
}
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Wed Mar 30 09:17:25 2011
@@ -21,53 +21,57 @@ import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
+import java.io.Reader;
import java.io.StringReader;
-import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Set;
-import java.util.HashSet;
+import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.Random;
-import java.util.Collections;
+import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
-import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.NoLockFactory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
-import org.apache.lucene.util.ThreadInterruptedException;
-import org.apache.lucene.util.BytesRef;
public class TestIndexWriter extends LuceneTestCase {
@@ -681,6 +685,122 @@ public class TestIndexWriter extends Luc
dir.close();
}
+ // Make sure it's OK to change RAM buffer size and
+ // maxBufferedDocs in a write session
+ public void testChangingRAMBuffer() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ writer.getConfig().setMaxBufferedDocs(10);
+ writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+
+ int lastFlushCount = -1;
+ for(int j=1;j<52;j++) {
+ Document doc = new Document();
+ doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+ writer.addDocument(doc);
+ _TestUtil.syncConcurrentMerges(writer);
+ int flushCount = writer.getFlushCount();
+ if (j == 1)
+ lastFlushCount = flushCount;
+ else if (j < 10)
+ // No new files should be created
+ assertEquals(flushCount, lastFlushCount);
+ else if (10 == j) {
+ assertTrue(flushCount > lastFlushCount);
+ lastFlushCount = flushCount;
+ writer.getConfig().setRAMBufferSizeMB(0.000001);
+ writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+ } else if (j < 20) {
+ assertTrue(flushCount > lastFlushCount);
+ lastFlushCount = flushCount;
+ } else if (20 == j) {
+ writer.getConfig().setRAMBufferSizeMB(16);
+ writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+ lastFlushCount = flushCount;
+ } else if (j < 30) {
+ assertEquals(flushCount, lastFlushCount);
+ } else if (30 == j) {
+ writer.getConfig().setRAMBufferSizeMB(0.000001);
+ writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+ } else if (j < 40) {
+ assertTrue(flushCount> lastFlushCount);
+ lastFlushCount = flushCount;
+ } else if (40 == j) {
+ writer.getConfig().setMaxBufferedDocs(10);
+ writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+ lastFlushCount = flushCount;
+ } else if (j < 50) {
+ assertEquals(flushCount, lastFlushCount);
+ writer.getConfig().setMaxBufferedDocs(10);
+ writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+ } else if (50 == j) {
+ assertTrue(flushCount > lastFlushCount);
+ }
+ }
+ writer.close();
+ dir.close();
+ }
+
+ public void testChangingRAMBuffer2() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ writer.getConfig().setMaxBufferedDocs(10);
+ writer.getConfig().setMaxBufferedDeleteTerms(10);
+ writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+
+ for(int j=1;j<52;j++) {
+ Document doc = new Document();
+ doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+ writer.addDocument(doc);
+ }
+
+ int lastFlushCount = -1;
+ for(int j=1;j<52;j++) {
+ writer.deleteDocuments(new Term("field", "aaa" + j));
+ _TestUtil.syncConcurrentMerges(writer);
+ int flushCount = writer.getFlushCount();
+ if (j == 1)
+ lastFlushCount = flushCount;
+ else if (j < 10) {
+ // No new files should be created
+ assertEquals(flushCount, lastFlushCount);
+ } else if (10 == j) {
+ assertTrue(flushCount > lastFlushCount);
+ lastFlushCount = flushCount;
+ writer.getConfig().setRAMBufferSizeMB(0.000001);
+ writer.getConfig().setMaxBufferedDeleteTerms(1);
+ } else if (j < 20) {
+ assertTrue(flushCount > lastFlushCount);
+ lastFlushCount = flushCount;
+ } else if (20 == j) {
+ writer.getConfig().setRAMBufferSizeMB(16);
+ writer.getConfig().setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+ lastFlushCount = flushCount;
+ } else if (j < 30) {
+ assertEquals(flushCount, lastFlushCount);
+ } else if (30 == j) {
+ writer.getConfig().setRAMBufferSizeMB(0.000001);
+ writer.getConfig().setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+ writer.getConfig().setMaxBufferedDeleteTerms(1);
+ } else if (j < 40) {
+ assertTrue(flushCount> lastFlushCount);
+ lastFlushCount = flushCount;
+ } else if (40 == j) {
+ writer.getConfig().setMaxBufferedDeleteTerms(10);
+ writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+ lastFlushCount = flushCount;
+ } else if (j < 50) {
+ assertEquals(flushCount, lastFlushCount);
+ writer.getConfig().setMaxBufferedDeleteTerms(10);
+ writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+ } else if (50 == j) {
+ assertTrue(flushCount > lastFlushCount);
+ }
+ }
+ writer.close();
+ dir.close();
+ }
+
public void testDiverseDocs() throws IOException {
MockDirectoryWrapper dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
@@ -2549,7 +2669,7 @@ public class TestIndexWriter extends Luc
final Random r = random;
Directory dir = newDirectory();
- FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(0.5).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
+ FlushCountingIndexWriter w = new FlushCountingIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, false)).setRAMBufferSizeMB(1.0).setMaxBufferedDocs(-1).setMaxBufferedDeleteTerms(-1));
w.setInfoStream(VERBOSE ? System.out : null);
Document doc = new Document();
doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
@@ -2576,7 +2696,7 @@ public class TestIndexWriter extends Luc
count++;
}
}
- assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 1500);
+ assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 3000);
}
w.close();
dir.close();
@@ -2912,4 +3032,123 @@ public class TestIndexWriter extends Luc
w.close();
dir.close();
}
+
+ private static class StringSplitAnalyzer extends Analyzer {
+ @Override
+ public TokenStream tokenStream(String fieldName, Reader reader) {
+ return new StringSplitTokenizer(reader);
+ }
+ }
+
+ private static class StringSplitTokenizer extends Tokenizer {
+ private final String[] tokens;
+ private int upto = 0;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+
+ public StringSplitTokenizer(Reader r) {
+ try {
+ final StringBuilder b = new StringBuilder();
+ final char[] buffer = new char[1024];
+ int n;
+ while((n = r.read(buffer)) != -1) {
+ b.append(buffer, 0, n);
+ }
+ tokens = b.toString().split(" ");
+ } catch (IOException ioe) {
+ throw new RuntimeException(ioe);
+ }
+ }
+
+ @Override
+ public final boolean incrementToken() throws IOException {
+ clearAttributes();
+ if (upto < tokens.length) {
+ termAtt.setEmpty();
+ termAtt.append(tokens[upto]);
+ upto++;
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+
+ /**
+ * Make sure we skip wicked long terms.
+ */
+ public void testWickedLongTerm() throws IOException {
+ Directory dir = newDirectory();
+ RandomIndexWriter w = new RandomIndexWriter(random, dir, new StringSplitAnalyzer());
+
+ char[] chars = new char[DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8];
+ Arrays.fill(chars, 'x');
+ Document doc = new Document();
+ final String bigTerm = new String(chars);
+ final BytesRef bigTermBytesRef = new BytesRef(bigTerm);
+
+ // This contents produces a too-long term:
+ String contents = "abc xyz x" + bigTerm + " another term";
+ doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED));
+ w.addDocument(doc);
+
+ // Make sure we can add another normal document
+ doc = new Document();
+ doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.ANALYZED));
+ w.addDocument(doc);
+
+ IndexReader reader = w.getReader();
+ w.close();
+
+ // Make sure all terms < max size were indexed
+ assertEquals(2, reader.docFreq(new Term("content", "abc")));
+ assertEquals(1, reader.docFreq(new Term("content", "bbb")));
+ assertEquals(1, reader.docFreq(new Term("content", "term")));
+ assertEquals(1, reader.docFreq(new Term("content", "another")));
+
+ // Make sure position is still incremented when
+ // massive term is skipped:
+ DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(reader, null, "content", new BytesRef("another"));
+ assertEquals(0, tps.nextDoc());
+ assertEquals(1, tps.freq());
+ assertEquals(3, tps.nextPosition());
+
+ // Make sure the doc that has the massive term is in
+ // the index:
+ assertEquals("document with wicked long term should is not in the index!", 2, reader.numDocs());
+
+ reader.close();
+ dir.close();
+ dir = newDirectory();
+
+ // Make sure we can add a document with exactly the
+ // maximum length term, and search on that term:
+ doc = new Document();
+ Field contentField = new Field("content", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+ doc.add(contentField);
+
+ w = new RandomIndexWriter(random, dir);
+
+ contentField.setValue("other");
+ w.addDocument(doc);
+
+ contentField.setValue("term");
+ w.addDocument(doc);
+
+ contentField.setValue(bigTerm);
+ w.addDocument(doc);
+
+ contentField.setValue("zzz");
+ w.addDocument(doc);
+
+ reader = w.getReader();
+ w.close();
+ assertEquals(1, reader.docFreq(new Term("content", bigTerm)));
+
+ FieldCache.DocTermsIndex dti = FieldCache.DEFAULT.getTermsIndex(reader, "content", random.nextBoolean());
+ assertEquals(5, dti.numOrd()); // +1 for null ord
+ assertEquals(4, dti.size());
+ assertEquals(bigTermBytesRef, dti.lookup(3, new BytesRef()));
+ reader.close();
+ dir.close();
+ }
}
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterConfig.java Wed Mar 30 09:17:25 2011
@@ -26,14 +26,14 @@ import java.util.Set;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.index.DocumentsWriterPerThread.IndexingChain;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.DefaultSimilarityProvider;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
public class TestIndexWriterConfig extends LuceneTestCase {
- private static final class MySimilarity extends DefaultSimilarity {
+ private static final class MySimilarityProvider extends DefaultSimilarityProvider {
// Does not implement anything - used only for type checking on IndexWriterConfig.
}
@@ -179,8 +179,8 @@ public class TestIndexWriterConfig exten
// Test Similarity:
// we shouldnt assert what the default is, just that its not null.
assertTrue(IndexSearcher.getDefaultSimilarityProvider() == conf.getSimilarityProvider());
- conf.setSimilarityProvider(new MySimilarity());
- assertEquals(MySimilarity.class, conf.getSimilarityProvider().getClass());
+ conf.setSimilarityProvider(new MySimilarityProvider());
+ assertEquals(MySimilarityProvider.class, conf.getSimilarityProvider().getClass());
conf.setSimilarityProvider(null);
assertTrue(IndexSearcher.getDefaultSimilarityProvider() == conf.getSimilarityProvider());
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java Wed Mar 30 09:17:25 2011
@@ -33,7 +33,9 @@ import org.apache.lucene.search.DocIdSet
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.MockDirectoryWrapper.Failure;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
@@ -781,7 +783,6 @@ public class TestIndexWriterExceptions e
}
}
}
-
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
assertTrue(failure.didFail);
failure.clearDoFail();
@@ -795,53 +796,79 @@ public class TestIndexWriterExceptions e
private static class FailOnlyInCommit extends MockDirectoryWrapper.Failure {
- boolean fail1, fail2;
+ boolean failOnCommit, failOnDeleteFile;
+ private final boolean dontFailDuringGlobalFieldMap;
+ private static final String PREPARE_STAGE = "prepareCommit";
+ private static final String FINISH_STAGE = "finishCommit";
+ private final String stage;
+
+ public FailOnlyInCommit(boolean dontFailDuringGlobalFieldMap, String stage) {
+ this.dontFailDuringGlobalFieldMap = dontFailDuringGlobalFieldMap;
+ this.stage = stage;
+ }
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
StackTraceElement[] trace = new Exception().getStackTrace();
boolean isCommit = false;
boolean isDelete = false;
+ boolean isInGlobalFieldMap = false;
for (int i = 0; i < trace.length; i++) {
- if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "prepareCommit".equals(trace[i].getMethodName()))
+ if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && stage.equals(trace[i].getMethodName()))
isCommit = true;
if ("org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName()))
isDelete = true;
+ if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "writeGlobalFieldMap".equals(trace[i].getMethodName()))
+ isInGlobalFieldMap = true;
+
+ }
+ if (isInGlobalFieldMap && dontFailDuringGlobalFieldMap) {
+ isCommit = false;
}
-
if (isCommit) {
if (!isDelete) {
- fail1 = true;
+ failOnCommit = true;
throw new RuntimeException("now fail first");
} else {
- fail2 = true;
+ failOnDeleteFile = true;
throw new IOException("now fail during delete");
}
}
}
}
- // LUCENE-1214
public void testExceptionsDuringCommit() throws Throwable {
- MockDirectoryWrapper dir = newDirectory();
- FailOnlyInCommit failure = new FailOnlyInCommit();
- IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
- Document doc = new Document();
- doc.add(newField("field", "a field", Field.Store.YES,
- Field.Index.ANALYZED));
- w.addDocument(doc);
- dir.failOn(failure);
- try {
- w.close();
- fail();
- } catch (IOException ioe) {
- fail("expected only RuntimeException");
- } catch (RuntimeException re) {
- // Expected
+ FailOnlyInCommit[] failures = new FailOnlyInCommit[] {
+ // LUCENE-1214
+ new FailOnlyInCommit(false, FailOnlyInCommit.PREPARE_STAGE), // fail during global field map is written
+ new FailOnlyInCommit(true, FailOnlyInCommit.PREPARE_STAGE), // fail after global field map is written
+ new FailOnlyInCommit(false, FailOnlyInCommit.FINISH_STAGE) // fail while running finishCommit
+ };
+
+ for (FailOnlyInCommit failure : failures) {
+ MockDirectoryWrapper dir = newDirectory();
+ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
+ TEST_VERSION_CURRENT, new MockAnalyzer()));
+ Document doc = new Document();
+ doc.add(newField("field", "a field", Field.Store.YES,
+ Field.Index.ANALYZED));
+ w.addDocument(doc);
+ dir.failOn(failure);
+ try {
+ w.close();
+ fail();
+ } catch (IOException ioe) {
+ fail("expected only RuntimeException");
+ } catch (RuntimeException re) {
+ // Expected
+ }
+ assertTrue(dir.fileExists("1.fnx"));
+ assertTrue(failure.failOnCommit && failure.failOnDeleteFile);
+ w.rollback();
+ assertFalse(dir.fileExists("1.fnx"));
+ // FIXME: on windows, this often fails! assertEquals(0, dir.listAll().length);
+ dir.close();
}
- assertTrue(failure.fail1 && failure.fail2);
- w.rollback();
- dir.close();
}
public void testOptimizeExceptions() throws IOException {
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java Wed Mar 30 09:17:25 2011
@@ -236,7 +236,7 @@ public class TestIndexWriterMergePolicy
int segmentCount = writer.getSegmentCount();
for (int i = segmentCount - 1; i >= 0; i--) {
int docCount = writer.getDocCount(i);
- assertTrue(docCount > lowerBound);
+ assertTrue("docCount=" + docCount + " lowerBound=" + lowerBound + " i=" + i + " segmentCount=" + segmentCount + " index=" + writer.segString(), docCount > lowerBound);
if (docCount <= upperBound) {
numSegments++;
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java Wed Mar 30 09:17:25 2011
@@ -27,6 +27,8 @@ import org.apache.lucene.analysis.MockTo
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.DefaultSimilarityProvider;
+import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@@ -46,7 +48,12 @@ public class TestMaxTermFrequency extend
dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(MockTokenizer.SIMPLE, true)).setMergePolicy(newInOrderLogMergePolicy());
- config.setSimilarityProvider(new TestSimilarity());
+ config.setSimilarityProvider(new DefaultSimilarityProvider() {
+ @Override
+ public Similarity get(String field) {
+ return new TestSimilarity();
+ }
+ });
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
Field foo = newField("foo", "", Field.Store.NO, Field.Index.ANALYZED);
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestNorms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestNorms.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestNorms.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestNorms.java Wed Mar 30 09:17:25 2011
@@ -29,6 +29,7 @@ import org.apache.lucene.document.Field.
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.DefaultSimilarityProvider;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.Directory;
@@ -40,17 +41,22 @@ import org.apache.lucene.util.LuceneTest
*/
public class TestNorms extends LuceneTestCase {
- private class SimilarityOne extends DefaultSimilarity {
+ private class SimilarityProviderOne extends DefaultSimilarityProvider {
@Override
- public float computeNorm(FieldInvertState state) {
- // Disable length norm
- return state.getBoost();
- }
+ public Similarity get(String field) {
+ return new DefaultSimilarity() {
+ @Override
+ public float computeNorm(FieldInvertState state) {
+ // diable length norm
+ return state.getBoost();
+ }
+ };
+ }
}
private static final int NUM_FIELDS = 10;
- private SimilarityProvider similarityOne;
+ private SimilarityProvider similarityProviderOne;
private Analyzer anlzr;
private int numDocNorms;
private ArrayList<Float> norms;
@@ -61,7 +67,7 @@ public class TestNorms extends LuceneTes
@Override
public void setUp() throws Exception {
super.setUp();
- similarityOne = new SimilarityOne();
+ similarityProviderOne = new SimilarityProviderOne();
anlzr = new MockAnalyzer();
}
@@ -152,7 +158,7 @@ public class TestNorms extends LuceneTes
private void createIndex(Random random, Directory dir) throws IOException {
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.CREATE)
- .setMaxBufferedDocs(5).setSimilarityProvider(similarityOne).setMergePolicy(newInOrderLogMergePolicy()));
+ .setMaxBufferedDocs(5).setSimilarityProvider(similarityProviderOne).setMergePolicy(newInOrderLogMergePolicy()));
LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy();
lmp.setMergeFactor(3);
lmp.setUseCompoundFile(true);
@@ -170,7 +176,7 @@ public class TestNorms extends LuceneTes
//System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
modifiedNorms.set(i, Float.valueOf(newNorm));
modifiedNorms.set(k, Float.valueOf(origNorm));
- Similarity sim = new DefaultSimilarity().get("f"+1);
+ Similarity sim = new DefaultSimilarity();
ir.setNorm(i, "f"+1, sim.encodeNormValue(newNorm));
ir.setNorm(k, "f"+1, sim.encodeNormValue(origNorm));
}
@@ -186,7 +192,7 @@ public class TestNorms extends LuceneTes
assertEquals("number of norms mismatches",numDocNorms,b.length);
ArrayList<Float> storedNorms = (i==1 ? modifiedNorms : norms);
for (int j = 0; j < b.length; j++) {
- float norm = similarityOne.get(field).decodeNormValue(b[j]);
+ float norm = similarityProviderOne.get(field).decodeNormValue(b[j]);
float norm1 = storedNorms.get(j).floatValue();
assertEquals("stored norm value of "+field+" for doc "+j+" is "+norm+" - a mismatch!", norm, norm1, 0.000001);
}
@@ -197,7 +203,7 @@ public class TestNorms extends LuceneTes
private void addDocs(Random random, Directory dir, int ndocs, boolean compound) throws IOException {
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
- .setMaxBufferedDocs(5).setSimilarityProvider(similarityOne).setMergePolicy(newInOrderLogMergePolicy()));
+ .setMaxBufferedDocs(5).setSimilarityProvider(similarityProviderOne).setMergePolicy(newInOrderLogMergePolicy()));
LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy();
lmp.setMergeFactor(3);
lmp.setUseCompoundFile(compound);
@@ -222,7 +228,7 @@ public class TestNorms extends LuceneTes
// return unique norm values that are unchanged by encoding/decoding
private float nextNorm(String fname) {
float norm = lastNorm + normDelta;
- Similarity similarity = similarityOne.get(fname);
+ Similarity similarity = similarityProviderOne.get(fname);
do {
float norm1 = similarity.decodeNormValue(similarity.encodeNormValue(norm));
if (norm1 > lastNorm) {
@@ -261,7 +267,12 @@ public class TestNorms extends LuceneTes
public void testCustomEncoder() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
- config.setSimilarityProvider(new CustomNormEncodingSimilarity());
+ config.setSimilarityProvider(new DefaultSimilarityProvider() {
+ @Override
+ public Similarity get(String field) {
+ return new CustomNormEncodingSimilarity();
+ }
+ });
RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
Document doc = new Document();
Field foo = newField("foo", "", Field.Store.NO, Field.Index.ANALYZED);
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestOmitTf.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestOmitTf.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestOmitTf.java Wed Mar 30 09:17:25 2011
@@ -35,27 +35,29 @@ import org.apache.lucene.search.Explanat
public class TestOmitTf extends LuceneTestCase {
- public static class SimpleSimilarity extends Similarity implements SimilarityProvider {
- @Override public float computeNorm(FieldInvertState state) { return state.getBoost(); }
- @Override public float tf(float freq) { return freq; }
- @Override public float sloppyFreq(int distance) { return 2.0f; }
- @Override public float idf(int docFreq, int numDocs) { return 1.0f; }
- @Override public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException {
- return new IDFExplanation() {
- @Override
- public float getIdf() {
- return 1.0f;
- }
- @Override
- public String explain() {
- return "Inexplicable";
- }
- };
- }
+ public static class SimpleSimilarityProvider implements SimilarityProvider {
public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
public float coord(int overlap, int maxOverlap) { return 1.0f; }
public Similarity get(String field) {
- return this;
+ return new Similarity() {
+
+ @Override public float computeNorm(FieldInvertState state) { return state.getBoost(); }
+ @Override public float tf(float freq) { return freq; }
+ @Override public float sloppyFreq(int distance) { return 2.0f; }
+ @Override public float idf(int docFreq, int numDocs) { return 1.0f; }
+ @Override public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException {
+ return new IDFExplanation() {
+ @Override
+ public float getIdf() {
+ return 1.0f;
+ }
+ @Override
+ public String explain() {
+ return "Inexplicable";
+ }
+ };
+ }
+ };
}
}
@@ -254,7 +256,7 @@ public class TestOmitTf extends LuceneTe
dir,
newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).
setMaxBufferedDocs(2).
- setSimilarityProvider(new SimpleSimilarity()).
+ setSimilarityProvider(new SimpleSimilarityProvider()).
setMergePolicy(newInOrderLogMergePolicy(2))
);
writer.setInfoStream(VERBOSE ? System.out : null);
@@ -285,7 +287,7 @@ public class TestOmitTf extends LuceneTe
* Verify the index
*/
IndexSearcher searcher = new IndexSearcher(dir, true);
- searcher.setSimilarityProvider(new SimpleSimilarity());
+ searcher.setSimilarityProvider(new SimpleSimilarityProvider());
Term a = new Term("noTf", term);
Term b = new Term("tf", term);
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestParallelReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestParallelReader.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestParallelReader.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestParallelReader.java Wed Mar 30 09:17:25 2011
@@ -149,8 +149,8 @@ public class TestParallelReader extends
assertTrue(pr.isCurrent());
IndexReader modifier = IndexReader.open(dir1, false);
- SimilarityProvider sim = new DefaultSimilarity();
- modifier.setNorm(0, "f1", sim.get("f1").encodeNormValue(100f));
+ Similarity sim = new DefaultSimilarity();
+ modifier.setNorm(0, "f1", sim.encodeNormValue(100f));
modifier.close();
// one of the two IndexReaders which ParallelReader is using
@@ -158,7 +158,7 @@ public class TestParallelReader extends
assertFalse(pr.isCurrent());
modifier = IndexReader.open(dir2, false);
- modifier.setNorm(0, "f3", sim.get("f3").encodeNormValue(100f));
+ modifier.setNorm(0, "f3", sim.encodeNormValue(100f));
modifier.close();
// now both are not current anymore
Modified: lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java?rev=1086876&r1=1086875&r2=1086876&view=diff
==============================================================================
--- lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (original)
+++ lucene/dev/branches/realtime_search/lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java Wed Mar 30 09:17:25 2011
@@ -78,9 +78,10 @@ public class TestSegmentMerger extends L
merger.add(reader2);
int docsMerged = merger.merge();
assertTrue(docsMerged == 2);
+ final FieldInfos fieldInfos = merger.fieldInfos();
//Should be able to open a new SegmentReader against the new directory
- SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false,
- merger.getSegmentCodecs(), merger.fieldInfos()),
+ SegmentReader mergedReader = SegmentReader.get(false, mergedDir, new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, fieldInfos.hasProx(),
+ merger.getSegmentCodecs(), fieldInfos.hasVectors(), fieldInfos),
BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
assertTrue(mergedReader != null);
assertTrue(mergedReader.numDocs() == 2);