You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by us...@apache.org on 2012/01/31 01:33:20 UTC
svn commit: r1238112 - in /lucene/dev/trunk:
lucene/src/java/org/apache/lucene/util/
lucene/src/test-framework/java/org/apache/lucene/search/
lucene/src/test/org/apache/lucene/util/
modules/grouping/src/test/org/apache/lucene/search/grouping/
Author: uschindler
Date: Tue Jan 31 00:33:19 2012
New Revision: 1238112
URL: http://svn.apache.org/viewvc?rev=1238112&view=rev
Log:
LUCENE-2858: Fix remaining TODO: Re-add FieldCache insanity checking, got lost as tricky to implement
Modified:
lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java
lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/search/CheckHits.java
lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java
lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java?rev=1238112&r1=1238111&r2=1238112&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java Tue Jan 31 00:33:19 2012
@@ -24,6 +24,8 @@ import java.util.Map;
import java.util.Set;
import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.CompositeReader;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.FieldCache.CacheEntry;
@@ -146,6 +148,9 @@ public final class FieldCacheSanityCheck
insanity.addAll(checkValueMismatch(valIdToItems,
readerFieldToValIds,
valMismatchKeys));
+ insanity.addAll(checkSubreaders(valIdToItems,
+ readerFieldToValIds));
+
return insanity.toArray(new Insanity[insanity.size()]);
}
@@ -186,6 +191,107 @@ public final class FieldCacheSanityCheck
return insanity;
}
+ /**
+ * Internal helper method used by check that iterates over
+ * the keys of readerFieldToValIds and generates a Collection
+ * of Insanity instances whenever two (or more) ReaderField instances are
+ * found that have an ancestry relationships.
+ *
+ * @see InsanityType#SUBREADER
+ */
+ private Collection<Insanity> checkSubreaders( MapOfSets<Integer, CacheEntry> valIdToItems,
+ MapOfSets<ReaderField, Integer> readerFieldToValIds) {
+
+ final List<Insanity> insanity = new ArrayList<Insanity>(23);
+
+ Map<ReaderField, Set<ReaderField>> badChildren = new HashMap<ReaderField, Set<ReaderField>>(17);
+ MapOfSets<ReaderField, ReaderField> badKids = new MapOfSets<ReaderField, ReaderField>(badChildren); // wrapper
+
+ Map<Integer, Set<CacheEntry>> viToItemSets = valIdToItems.getMap();
+ Map<ReaderField, Set<Integer>> rfToValIdSets = readerFieldToValIds.getMap();
+
+ Set<ReaderField> seen = new HashSet<ReaderField>(17);
+
+ Set<ReaderField> readerFields = rfToValIdSets.keySet();
+ for (final ReaderField rf : readerFields) {
+
+ if (seen.contains(rf)) continue;
+
+ List<Object> kids = getAllDescendantReaderKeys(rf.readerKey);
+ for (Object kidKey : kids) {
+ ReaderField kid = new ReaderField(kidKey, rf.fieldName);
+
+ if (badChildren.containsKey(kid)) {
+ // we've already process this kid as RF and found other problems
+ // track those problems as our own
+ badKids.put(rf, kid);
+ badKids.putAll(rf, badChildren.get(kid));
+ badChildren.remove(kid);
+
+ } else if (rfToValIdSets.containsKey(kid)) {
+ // we have cache entries for the kid
+ badKids.put(rf, kid);
+ }
+ seen.add(kid);
+ }
+ seen.add(rf);
+ }
+
+ // every mapping in badKids represents an Insanity
+ for (final ReaderField parent : badChildren.keySet()) {
+ Set<ReaderField> kids = badChildren.get(parent);
+
+ List<CacheEntry> badEntries = new ArrayList<CacheEntry>(kids.size() * 2);
+
+ // put parent entr(ies) in first
+ {
+ for (final Integer value : rfToValIdSets.get(parent)) {
+ badEntries.addAll(viToItemSets.get(value));
+ }
+ }
+
+ // now the entries for the descendants
+ for (final ReaderField kid : kids) {
+ for (final Integer value : rfToValIdSets.get(kid)) {
+ badEntries.addAll(viToItemSets.get(value));
+ }
+ }
+
+ CacheEntry[] badness = new CacheEntry[badEntries.size()];
+ badness = badEntries.toArray(badness);
+
+ insanity.add(new Insanity(InsanityType.SUBREADER,
+ "Found caches for descendants of " +
+ parent.toString(),
+ badness));
+ }
+
+ return insanity;
+
+ }
+
+ /**
+ * Checks if the seed is an IndexReader, and if so will walk
+ * the hierarchy of subReaders building up a list of the objects
+ * returned by obj.getFieldCacheKey()
+ */
+ private List<Object> getAllDescendantReaderKeys(Object seed) {
+ List<Object> all = new ArrayList<Object>(17); // will grow as we iter
+ all.add(seed);
+ for (int i = 0; i < all.size(); i++) {
+ Object obj = all.get(i);
+ if (obj instanceof CompositeReader) {
+ IndexReader[] subs = ((CompositeReader)obj).getSequentialSubReaders();
+ for (int j = 0; (null != subs) && (j < subs.length); j++) {
+ all.add(subs[j].getCoreCacheKey());
+ }
+ }
+
+ }
+ // need to skip the first, because it was the seed
+ return all.subList(1, all.size());
+ }
+
/**
* Simple pair object for using "readerKey + fieldName" a Map key
*/
Modified: lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/search/CheckHits.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/search/CheckHits.java?rev=1238112&r1=1238111&r2=1238112&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/search/CheckHits.java (original)
+++ lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/search/CheckHits.java Tue Jan 31 00:33:19 2012
@@ -116,7 +116,7 @@ public class CheckHits {
Assert.assertEquals("Wrap Reader " + i + ": " +
query.toString(defaultFieldName),
correct, actual);
- // TODO: FieldCache.DEFAULT.purge(s.getIndexReader()); // our wrapping can create insanity otherwise
+ QueryUtils.purgeFieldCache(s.getIndexReader()); // our wrapping can create insanity otherwise
}
}
Modified: lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java?rev=1238112&r1=1238111&r2=1238112&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java (original)
+++ lucene/dev/trunk/lucene/src/test-framework/java/org/apache/lucene/search/QueryUtils.java Tue Jan 31 00:33:19 2012
@@ -31,6 +31,7 @@ import org.apache.lucene.index.Directory
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
@@ -116,14 +117,11 @@ public class QueryUtils {
if (wrap) {
IndexSearcher wrapped;
check(random, q1, wrapped = wrapUnderlyingReader(random, s, -1), false);
- // TODO: I removed that as we can never get insanity by composite readers anymore... Is this ok?
- //FieldCache.DEFAULT.purge(wrapped.getIndexReader()); // our wrapping can create insanity otherwise
+ purgeFieldCache(wrapped.getIndexReader()); // our wrapping can create insanity otherwise
check(random, q1, wrapped = wrapUnderlyingReader(random, s, 0), false);
- // TODO: I removed that as we can never get insanity by composite readers anymore... Is this ok?
- //FieldCache.DEFAULT.purge(wrapped.getIndexReader()); // our wrapping can create insanity otherwise
+ purgeFieldCache(wrapped.getIndexReader()); // our wrapping can create insanity otherwise
check(random, q1, wrapped = wrapUnderlyingReader(random, s, +1), false);
- // TODO: I removed that as we can never get insanity by composite readers anymore... Is this ok?
- //FieldCache.DEFAULT.purge(wrapped.getIndexReader()); // our wrapping can create insanity otherwise
+ purgeFieldCache(wrapped.getIndexReader()); // our wrapping can create insanity otherwise
}
checkExplanations(q1,s);
@@ -135,6 +133,11 @@ public class QueryUtils {
throw new RuntimeException(e);
}
}
+
+ public static void purgeFieldCache(IndexReader r) throws IOException {
+ // this is just a hack, to get an atomic reader that contains all subreaders for insanity checks
+ FieldCache.DEFAULT.purge(SlowCompositeReaderWrapper.wrap(r));
+ }
/**
* Given an IndexSearcher, returns a new IndexSearcher whose IndexReader
Modified: lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java?rev=1238112&r1=1238111&r2=1238112&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (original)
+++ lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java Tue Jan 31 00:33:19 2012
@@ -138,4 +138,31 @@ public class TestFieldCacheSanityChecker
cache.purgeAllCaches();
}
+ public void testInsanity2() throws IOException {
+ FieldCache cache = FieldCache.DEFAULT;
+ cache.purgeAllCaches();
+
+ cache.getTerms(readerA, "theString");
+ cache.getTerms(readerB, "theString");
+ cache.getTerms(readerX, "theString");
+
+ cache.getBytes(readerX, "theByte", false);
+
+
+ // // //
+
+ Insanity[] insanity =
+ FieldCacheSanityChecker.checkSanity(cache.getCacheEntries());
+
+ assertEquals("wrong number of cache errors", 1, insanity.length);
+ assertEquals("wrong type of cache error",
+ InsanityType.SUBREADER,
+ insanity[0].getType());
+ assertEquals("wrong number of entries in cache error", 3,
+ insanity[0].getCacheEntries().length);
+
+ // we expect bad things, don't let tearDown complain about them
+ cache.purgeAllCaches();
+ }
+
}
Modified: lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java?rev=1238112&r1=1238111&r2=1238112&view=diff
==============================================================================
--- lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java (original)
+++ lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java Tue Jan 31 00:33:19 2012
@@ -366,7 +366,7 @@ public class AllGroupHeadsCollectorTest
}
}
} finally {
- // TODO: FieldCache.DEFAULT.purge(r);
+ QueryUtils.purgeFieldCache(r);
}
r.close();
Modified: lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java?rev=1238112&r1=1238111&r2=1238112&view=diff
==============================================================================
--- lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (original)
+++ lucene/dev/trunk/modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java Tue Jan 31 00:33:19 2012
@@ -1140,9 +1140,9 @@ public class TestGrouping extends Lucene
assertEquals(docIDToIDBlocks, expectedGroups, topGroupsBlockShards, false, false, fillFields, getScores, false);
}
} finally {
- // TODO: FieldCache.DEFAULT.purge(r);
+ QueryUtils.purgeFieldCache(r);
if (rBlocks != null) {
- // TODO: FieldCache.DEFAULT.purge(rBlocks);
+ QueryUtils.purgeFieldCache(rBlocks);
}
}