You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by si...@apache.org on 2012/08/13 15:53:27 UTC

svn commit: r1372423 [37/45] - in /lucene/dev/branches/LUCENE-2878: ./ dev-tools/ dev-tools/eclipse/ dev-tools/idea/.idea/libraries/ dev-tools/maven/ dev-tools/maven/lucene/ dev-tools/maven/lucene/analysis/common/ dev-tools/maven/lucene/analysis/icu/ d...

Modified: lucene/dev/branches/LUCENE-2878/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java Mon Aug 13 13:52:46 2012
@@ -37,11 +37,13 @@ import org.apache.lucene.index.Directory
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queryparser.classic.CharStream;
-import org.apache.lucene.queryparser.classic.ParseException;
-import org.apache.lucene.queryparser.classic.QueryParser;
+//import org.apache.lucene.queryparser.classic.CharStream;
+//import org.apache.lucene.queryparser.classic.ParseException;
+//import org.apache.lucene.queryparser.classic.QueryParser;
+//import org.apache.lucene.queryparser.classic.QueryParserBase;
+//import org.apache.lucene.queryparser.classic.QueryParserTokenManager;
 import org.apache.lucene.queryparser.classic.QueryParserBase;
-import org.apache.lucene.queryparser.classic.QueryParserTokenManager;
+import org.apache.lucene.queryparser.flexible.standard.CommonQueryParserConfiguration;
 import org.apache.lucene.search.*;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.Directory;
@@ -111,23 +113,17 @@ public abstract class QueryParserTestBas
     }
   }
 
-  public static class QPTestParser extends QueryParser {
-    public QPTestParser(String f, Analyzer a) {
-      super(TEST_VERSION_CURRENT, f, a);
-    }
-
-    @Override
-    protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException {
-      throw new ParseException("Fuzzy queries not allowed");
-    }
-
-    @Override
-    protected Query getWildcardQuery(String field, String termStr) throws ParseException {
-      throw new ParseException("Wildcard queries not allowed");
-    }
+  private int originalMaxClauses;
+  
+  private String defaultField = "field";
+  
+  protected String getDefaultField(){
+    return defaultField;
   }
 
-  private int originalMaxClauses;
+  protected void setDefaultField(String defaultField){
+    this.defaultField = defaultField;
+  }
 
   @Override
   public void setUp() throws Exception {
@@ -135,10 +131,26 @@ public abstract class QueryParserTestBas
     originalMaxClauses = BooleanQuery.getMaxClauseCount();
   }
 
-  public abstract QueryParser getParser(Analyzer a) throws Exception;
+  public abstract CommonQueryParserConfiguration getParserConfig(Analyzer a) throws Exception;
+
+  public abstract void setDefaultOperatorOR(CommonQueryParserConfiguration cqpC);
+
+  public abstract void setDefaultOperatorAND(CommonQueryParserConfiguration cqpC);
+
+  public abstract void setAnalyzeRangeTerms(CommonQueryParserConfiguration cqpC, boolean value);
+
+  public abstract void setAutoGeneratePhraseQueries(CommonQueryParserConfiguration cqpC, boolean value);
 
-  public Query getQuery(String query, Analyzer a) throws Exception {
-    return getParser(a).parse(query);
+  public abstract void setDateResolution(CommonQueryParserConfiguration cqpC, CharSequence field, DateTools.Resolution value);
+
+  public abstract Query getQuery(String query, CommonQueryParserConfiguration cqpC) throws Exception;
+
+  public abstract Query getQuery(String query, Analyzer a) throws Exception;
+  
+  public abstract boolean isQueryParserException(Exception exception);
+
+  public Query getQuery(String query) throws Exception {
+    return getQuery(query, (Analyzer)null);
   }
 
   public void assertQueryEquals(String query, Analyzer a, String result)
@@ -151,9 +163,9 @@ public abstract class QueryParserTestBas
     }
   }
 
-  public void assertQueryEquals(QueryParser qp, String field, String query, String result) 
+  public void assertQueryEquals(CommonQueryParserConfiguration cqpC, String field, String query, String result) 
     throws Exception {
-    Query q = qp.parse(query);
+    Query q = getQuery(query, cqpC);
     String s = q.toString(field);
     if (!s.equals(result)) {
       fail("Query /" + query + "/ yielded /" + s
@@ -172,10 +184,10 @@ public abstract class QueryParserTestBas
 
   public void assertWildcardQueryEquals(String query, boolean lowercase, String result, boolean allowLeadingWildcard)
     throws Exception {
-    QueryParser qp = getParser(null);
-    qp.setLowercaseExpandedTerms(lowercase);
-    qp.setAllowLeadingWildcard(allowLeadingWildcard);
-    Query q = qp.parse(query);
+    CommonQueryParserConfiguration cqpC = getParserConfig(null);
+    cqpC.setLowercaseExpandedTerms(lowercase);
+    cqpC.setAllowLeadingWildcard(allowLeadingWildcard);
+    Query q = getQuery(query, cqpC);
     String s = q.toString("field");
     if (!s.equals(result)) {
       fail("WildcardQuery /" + query + "/ yielded /" + s
@@ -189,8 +201,7 @@ public abstract class QueryParserTestBas
   }
 
   public void assertWildcardQueryEquals(String query, String result) throws Exception {
-    QueryParser qp = getParser(null);
-    Query q = qp.parse(query);
+    Query q = getQuery(query);
     String s = q.toString("field");
     if (!s.equals(result)) {
       fail("WildcardQuery /" + query + "/ yielded /" + s + "/, expecting /"
@@ -202,9 +213,9 @@ public abstract class QueryParserTestBas
     throws Exception {
     if (a == null)
       a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
-    qp.setDefaultOperator(QueryParserBase.AND_OPERATOR);
-    return qp.parse(query);
+    CommonQueryParserConfiguration qp = getParserConfig(a);
+    setDefaultOperatorAND(qp);
+    return getQuery(query, qp);
   }
 
   public void assertQueryEqualsDOA(String query, Analyzer a, String result)
@@ -315,9 +326,9 @@ public abstract class QueryParserTestBas
     PhraseQuery expected = new PhraseQuery();
     expected.add(new Term("field", "中"));
     expected.add(new Term("field", "国"));
-    QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer);
-    parser.setAutoGeneratePhraseQueries(true);
-    assertEquals(expected, parser.parse("中国"));
+    CommonQueryParserConfiguration qp = getParserConfig(analyzer);
+    setAutoGeneratePhraseQueries(qp, true);
+    assertEquals(expected, getQuery("中国",qp));
   }
 
   public void testSimple() throws Exception {
@@ -345,26 +356,15 @@ public abstract class QueryParserTestBas
 //    assertQueryEquals("a OR ! b", null, "a -b");
     assertQueryEquals("a OR -b", null, "a -b");
 
-    // +,-,! should be directly adjacent to operand (i.e. not separated by whitespace) to be treated as an operator
-    Analyzer a = new Analyzer() {
-      @Override
-      public TokenStreamComponents createComponents(String fieldName, Reader reader) {
-        return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
-      }
-    };
-    assertQueryEquals("a - b", a, "a - b");
-    assertQueryEquals("a + b", a, "a + b");
-    assertQueryEquals("a ! b", a, "a ! b");
-
     assertQueryEquals("+term -term term", null, "+term -term term");
     assertQueryEquals("foo:term AND field:anotherTerm", null,
                       "+foo:term +anotherterm");
     assertQueryEquals("term AND \"phrase phrase\"", null,
                       "+term +\"phrase phrase\"");
     assertQueryEquals("\"hello there\"", null, "\"hello there\"");
-    assertTrue(getQuery("a AND b", null) instanceof BooleanQuery);
-    assertTrue(getQuery("hello", null) instanceof TermQuery);
-    assertTrue(getQuery("\"hello there\"", null) instanceof PhraseQuery);
+    assertTrue(getQuery("a AND b") instanceof BooleanQuery);
+    assertTrue(getQuery("hello") instanceof TermQuery);
+    assertTrue(getQuery("\"hello there\"") instanceof PhraseQuery);
 
     assertQueryEquals("germ term^2.0", null, "germ term^2.0");
     assertQueryEquals("(term)^2.0", null, "term^2.0");
@@ -383,15 +383,24 @@ public abstract class QueryParserTestBas
     assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
                       "+(title:dog title:cat) -author:\"bob dole\"");
     
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random()));
-    // make sure OR is the default:
-    assertEquals(QueryParserBase.OR_OPERATOR, qp.getDefaultOperator());
-    qp.setDefaultOperator(QueryParserBase.AND_OPERATOR);
-    assertEquals(QueryParserBase.AND_OPERATOR, qp.getDefaultOperator());
-    qp.setDefaultOperator(QueryParserBase.OR_OPERATOR);
-    assertEquals(QueryParserBase.OR_OPERATOR, qp.getDefaultOperator());
   }
 
+  public abstract void testDefaultOperator() throws Exception;
+  
+  
+  public void testOperatorVsWhitespace() throws Exception { //LUCENE-2566
+    // +,-,! should be directly adjacent to operand (i.e. not separated by whitespace) to be treated as an operator
+    Analyzer a = new Analyzer() {
+      @Override
+      public TokenStreamComponents createComponents(String fieldName, Reader reader) {
+        return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false));
+      }
+    };
+    assertQueryEquals("a - b", a, "a - b");
+    assertQueryEquals("a + b", a, "a + b");
+    assertQueryEquals("a ! b", a, "a ! b");  
+  }
+  
   public void testPunct() throws Exception {
     Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
     assertQueryEquals("a&b", a, "a&b");
@@ -430,20 +439,20 @@ public abstract class QueryParserTestBas
     assertQueryEquals("term*germ", null, "term*germ");
     assertQueryEquals("term*germ^3", null, "term*germ^3.0");
 
-    assertTrue(getQuery("term*", null) instanceof PrefixQuery);
-    assertTrue(getQuery("term*^2", null) instanceof PrefixQuery);
-    assertTrue(getQuery("term~", null) instanceof FuzzyQuery);
-    assertTrue(getQuery("term~0.7", null) instanceof FuzzyQuery);
-    FuzzyQuery fq = (FuzzyQuery)getQuery("term~0.7", null);
+    assertTrue(getQuery("term*") instanceof PrefixQuery);
+    assertTrue(getQuery("term*^2") instanceof PrefixQuery);
+    assertTrue(getQuery("term~") instanceof FuzzyQuery);
+    assertTrue(getQuery("term~0.7") instanceof FuzzyQuery);
+    FuzzyQuery fq = (FuzzyQuery)getQuery("term~0.7");
     assertEquals(1, fq.getMaxEdits());
     assertEquals(FuzzyQuery.defaultPrefixLength, fq.getPrefixLength());
-    fq = (FuzzyQuery)getQuery("term~", null);
+    fq = (FuzzyQuery)getQuery("term~");
     assertEquals(2, fq.getMaxEdits());
     assertEquals(FuzzyQuery.defaultPrefixLength, fq.getPrefixLength());
     
     assertParseException("term~1.1"); // value > 1, throws exception
 
-    assertTrue(getQuery("term*germ", null) instanceof WildcardQuery);
+    assertTrue(getQuery("term*germ") instanceof WildcardQuery);
 
 /* Tests to see that wild card terms are (or are not) properly
    * lower-cased with propery parser configuration
@@ -483,15 +492,20 @@ public abstract class QueryParserTestBas
     // Test suffix queries: first disallow
     try {
       assertWildcardQueryEquals("*Term", true, "*term");
-      fail();
-    } catch(ParseException pe) {
+    } catch(Exception pe) {
       // expected exception
+      if(!isQueryParserException(pe)){
+        fail();
+      }
     }
     try {
       assertWildcardQueryEquals("?Term", true, "?term");
       fail();
-    } catch(ParseException pe) {
+    } catch(Exception pe) {
       // expected exception
+      if(!isQueryParserException(pe)){
+        fail();
+      }
     }
     // Test suffix queries: then allow
     assertWildcardQueryEquals("*Term", true, "*term", true);
@@ -499,11 +513,11 @@ public abstract class QueryParserTestBas
   }
   
   public void testLeadingWildcardType() throws Exception {
-    QueryParser qp = getParser(null);
-    qp.setAllowLeadingWildcard(true);
-    assertEquals(WildcardQuery.class, qp.parse("t*erm*").getClass());
-    assertEquals(WildcardQuery.class, qp.parse("?term*").getClass());
-    assertEquals(WildcardQuery.class, qp.parse("*term*").getClass());
+    CommonQueryParserConfiguration cqpC = getParserConfig(null);
+    cqpC.setAllowLeadingWildcard(true);
+    assertEquals(WildcardQuery.class, getQuery("t*erm*",cqpC).getClass());
+    assertEquals(WildcardQuery.class, getQuery("?term*",cqpC).getClass());
+    assertEquals(WildcardQuery.class, getQuery("*term*",cqpC).getClass());
   }
 
   public void testQPA() throws Exception {
@@ -540,11 +554,12 @@ public abstract class QueryParserTestBas
     assertQueryEquals("[ a TO z}", null, "[a TO z}");
     assertQueryEquals("{ a TO z]", null, "{a TO z]"); 
 
-     assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
+     assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]")).getRewriteMethod());
 
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+    CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+    
     qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
-    assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod());
+    assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)getQuery("[ a TO z]", qp)).getRewriteMethod());
     
     // test open ranges
     assertQueryEquals("[ a TO * ]", null, "[a TO *]");
@@ -569,9 +584,13 @@ public abstract class QueryParserTestBas
     assertQueryEquals("[* TO Z]",null,"[* TO z]");
     assertQueryEquals("[A TO *]",null,"[a TO *]");
     assertQueryEquals("[* TO *]",null,"[* TO *]");
-    assertQueryEquals("[\\* TO \"*\"]",null,"[\\* TO \\*]");
  }
-    
+
+  public void testRangeWithPhrase() throws Exception {
+    assertQueryEquals("[\\* TO \"*\"]",null,"[\\* TO \\*]");
+    assertQueryEquals("[\"*\" TO *]",null,"[\\* TO *]");
+  }
+
   private String escapeDateString(String s) {
     if (s.indexOf(" ") > -1) {
       return "\"" + s + "\"";
@@ -616,16 +635,17 @@ public abstract class QueryParserTestBas
     final String defaultField = "default";
     final String monthField = "month";
     final String hourField = "hour";
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+    Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
+    CommonQueryParserConfiguration qp = getParserConfig(a);
     
     // set a field specific date resolution
-    qp.setDateResolution(monthField, DateTools.Resolution.MONTH);
+    setDateResolution(qp, monthField, DateTools.Resolution.MONTH);
     
     // set default date resolution to MILLISECOND
     qp.setDateResolution(DateTools.Resolution.MILLISECOND);
     
     // set second field specific date resolution    
-    qp.setDateResolution(hourField, DateTools.Resolution.HOUR);
+    setDateResolution(qp, hourField, DateTools.Resolution.HOUR);
 
     // for this field no field specific date resolution has been set,
     // so verify if the default resolution is used
@@ -640,11 +660,11 @@ public abstract class QueryParserTestBas
             endDateExpected.getTime(), DateTools.Resolution.HOUR);  
   }
   
-  public void assertDateRangeQueryEquals(QueryParser qp, String field, String startDate, String endDate, 
+  public void assertDateRangeQueryEquals(CommonQueryParserConfiguration cqpC, String field, String startDate, String endDate, 
                                          Date endDateInclusive, DateTools.Resolution resolution) throws Exception {
-    assertQueryEquals(qp, field, field + ":[" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]",
+    assertQueryEquals(cqpC, field, field + ":[" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]",
                "[" + getDate(startDate, resolution) + " TO " + getDate(endDateInclusive, resolution) + "]");
-    assertQueryEquals(qp, field, field + ":{" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "}",
+    assertQueryEquals(cqpC, field, field + ":{" + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "}",
                "{" + getDate(startDate, resolution) + " TO " + getDate(endDate, resolution) + "}");
   }
 
@@ -693,12 +713,6 @@ public abstract class QueryParserTestBas
 
     assertQueryEquals("a:b\\\\c*", a, "a:b\\c*");
 
-    assertQueryEquals("a:b\\-?c", a, "a:b\\-?c");
-    assertQueryEquals("a:b\\+?c", a, "a:b\\+?c");
-    assertQueryEquals("a:b\\:?c", a, "a:b\\:?c");
-
-    assertQueryEquals("a:b\\\\?c", a, "a:b\\\\?c");
-
     assertQueryEquals("a:b\\-c~", a, "a:b-c~2");
     assertQueryEquals("a:b\\+c~", a, "a:b+c~2");
     assertQueryEquals("a:b\\:c~", a, "a:b:c~2");
@@ -741,7 +755,16 @@ public abstract class QueryParserTestBas
     // LUCENE-1189
     assertQueryEquals("(\"a\\\\\") or (\"b\")", a ,"a\\ or b");
   }
+  
+  public void testEscapedVsQuestionMarkAsWildcard() throws Exception {
+    Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
+    assertQueryEquals("a:b\\-?c", a, "a:b\\-?c");
+    assertQueryEquals("a:b\\+?c", a, "a:b\\+?c");
+    assertQueryEquals("a:b\\:?c", a, "a:b\\:?c");
 
+    assertQueryEquals("a:b\\\\?c", a, "a:b\\\\?c");
+  }
+  
   public void testQueryStringEscaping() throws Exception {
     Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
 
@@ -830,20 +853,21 @@ public abstract class QueryParserTestBas
     throws Exception {
     CharacterRunAutomaton stopWords = new CharacterRunAutomaton(BasicAutomata.makeString("on"));
     Analyzer oneStopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopWords, true);
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", oneStopAnalyzer);
-    Query q = qp.parse("on^1.0");
+    CommonQueryParserConfiguration qp = getParserConfig(oneStopAnalyzer);
+    Query q = getQuery("on^1.0",qp);
     assertNotNull(q);
-    q = qp.parse("\"hello\"^2.0");
+    q = getQuery("\"hello\"^2.0",qp);
     assertNotNull(q);
     assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
-    q = qp.parse("hello^2.0");
+    q = getQuery("hello^2.0",qp);
     assertNotNull(q);
     assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
-    q = qp.parse("\"on\"^1.0");
+    q = getQuery("\"on\"^1.0",qp);
     assertNotNull(q);
 
-    QueryParser qp2 = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
-    q = qp2.parse("the^3");
+    Analyzer a2 = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); 
+    CommonQueryParserConfiguration qp2 = getParserConfig(a2);
+    q = getQuery("the^3", qp2);
     // "the" is a stop word so the result is an empty query:
     assertNotNull(q);
     assertEquals("", q.toString());
@@ -852,13 +876,26 @@ public abstract class QueryParserTestBas
 
   public void assertParseException(String queryString) throws Exception {
     try {
-      getQuery(queryString, null);
-    } catch (ParseException expected) {
-      return;
+      getQuery(queryString);
+    } catch (Exception expected) {
+      if(isQueryParserException(expected)){
+        return;
+      }
+    }
+    fail("ParseException expected, not thrown");
+  }
+
+  public void assertParseException(String queryString, Analyzer a) throws Exception {
+    try {
+      getQuery(queryString, a);
+    } catch (Exception expected) {
+      if(isQueryParserException(expected)){
+        return;
+      }
     }
     fail("ParseException expected, not thrown");
   }
-       
+
   public void testException() throws Exception {
     assertParseException("\"some phrase");
     assertParseException("(foo bar");
@@ -867,44 +904,20 @@ public abstract class QueryParserTestBas
     assertParseException("(sub query)^5.0^2.0 plus more");
     assertParseException("secret AND illegal) AND access:confidential");
   }
-  
-
-  public void testCustomQueryParserWildcard() {
-    try {
-      new QPTestParser("contents", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).parse("a?t");
-      fail("Wildcard queries should not be allowed");
-    } catch (ParseException expected) {
-      // expected exception
-    }
-  }
-
-  public void testCustomQueryParserFuzzy() throws Exception {
-    try {
-      new QPTestParser("contents", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).parse("xunit~");
-      fail("Fuzzy queries should not be allowed");
-    } catch (ParseException expected) {
-      // expected exception
-    }
-  }
 
   public void testBooleanQuery() throws Exception {
     BooleanQuery.setMaxClauseCount(2);
-    try {
-      QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
-      qp.parse("one two three");
-      fail("ParseException expected due to too many boolean clauses");
-    } catch (ParseException expected) {
-      // too many boolean clauses, so ParseException is expected
-    }
+    Analyzer purWhitespaceAnalyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
+    assertParseException("one two three", purWhitespaceAnalyzer);
   }
 
   /**
    * This test differs from TestPrecedenceQueryParser
    */
   public void testPrecedence() throws Exception {
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
-    Query query1 = qp.parse("A AND B OR C AND D");
-    Query query2 = qp.parse("+A +B +C +D");
+    CommonQueryParserConfiguration qp = getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
+    Query query1 = getQuery("A AND B OR C AND D", qp);
+    Query query2 = getQuery("+A +B +C +D", qp);
     assertEquals(query1, query2);
   }
 
@@ -936,131 +949,73 @@ public abstract class QueryParserTestBas
 //    iw.addDocument(d);
 //  }
 
-  public void testStarParsing() throws Exception {
-    final int[] type = new int[1];
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) {
-      @Override
-      protected Query getWildcardQuery(String field, String termStr) {
-        // override error checking of superclass
-        type[0]=1;
-        return new TermQuery(new Term(field,termStr));
-      }
-      @Override
-      protected Query getPrefixQuery(String field, String termStr) {
-        // override error checking of superclass
-        type[0]=2;        
-        return new TermQuery(new Term(field,termStr));
-      }
-
-      @Override
-      protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
-        type[0]=3;
-        return super.getFieldQuery(field, queryText, quoted);
-      }
-    };
-
-    TermQuery tq;
-
-    tq = (TermQuery)qp.parse("foo:zoo*");
-    assertEquals("zoo",tq.getTerm().text());
-    assertEquals(2,type[0]);
-
-    tq = (TermQuery)qp.parse("foo:zoo*^2");
-    assertEquals("zoo",tq.getTerm().text());
-    assertEquals(2,type[0]);
-    assertEquals(tq.getBoost(),2,0);
-
-    tq = (TermQuery)qp.parse("foo:*");
-    assertEquals("*",tq.getTerm().text());
-    assertEquals(1,type[0]);  // could be a valid prefix query in the future too
-
-    tq = (TermQuery)qp.parse("foo:*^2");
-    assertEquals("*",tq.getTerm().text());
-    assertEquals(1,type[0]);
-    assertEquals(tq.getBoost(),2,0);    
-
-    tq = (TermQuery)qp.parse("*:foo");
-    assertEquals("*",tq.getTerm().field());
-    assertEquals("foo",tq.getTerm().text());
-    assertEquals(3,type[0]);
-
-    tq = (TermQuery)qp.parse("*:*");
-    assertEquals("*",tq.getTerm().field());
-    assertEquals("*",tq.getTerm().text());
-    assertEquals(1,type[0]);  // could be handled as a prefix query in the future
-
-     tq = (TermQuery)qp.parse("(*:*)");
-    assertEquals("*",tq.getTerm().field());
-    assertEquals("*",tq.getTerm().text());
-    assertEquals(1,type[0]);
-
-  }
+  public abstract void testStarParsing() throws Exception;
 
   public void testEscapedWildcard() throws Exception {
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
+    CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
     WildcardQuery q = new WildcardQuery(new Term("field", "foo\\?ba?r"));
-    assertEquals(q, qp.parse("foo\\?ba?r"));
+    assertEquals(q, getQuery("foo\\?ba?r", qp));
   }
   
   public void testRegexps() throws Exception {
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
+    CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
     RegexpQuery q = new RegexpQuery(new Term("field", "[a-z][123]"));
-    assertEquals(q, qp.parse("/[a-z][123]/"));
+    assertEquals(q, getQuery("/[a-z][123]/",qp));
     qp.setLowercaseExpandedTerms(true);
-    assertEquals(q, qp.parse("/[A-Z][123]/"));
+    assertEquals(q, getQuery("/[A-Z][123]/",qp));
     q.setBoost(0.5f);
-    assertEquals(q, qp.parse("/[A-Z][123]/^0.5"));
+    assertEquals(q, getQuery("/[A-Z][123]/^0.5",qp));
     qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
     q.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
-    assertTrue(qp.parse("/[A-Z][123]/^0.5") instanceof RegexpQuery);
-    assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE, ((RegexpQuery)qp.parse("/[A-Z][123]/^0.5")).getRewriteMethod());
-    assertEquals(q, qp.parse("/[A-Z][123]/^0.5"));
+    assertTrue(getQuery("/[A-Z][123]/^0.5",qp) instanceof RegexpQuery);
+    assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE, ((RegexpQuery)getQuery("/[A-Z][123]/^0.5",qp)).getRewriteMethod());
+    assertEquals(q, getQuery("/[A-Z][123]/^0.5",qp));
     qp.setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
     
     Query escaped = new RegexpQuery(new Term("field", "[a-z]\\/[123]"));
-    assertEquals(escaped, qp.parse("/[a-z]\\/[123]/"));
+    assertEquals(escaped, getQuery("/[a-z]\\/[123]/",qp));
     Query escaped2 = new RegexpQuery(new Term("field", "[a-z]\\*[123]"));
-    assertEquals(escaped2, qp.parse("/[a-z]\\*[123]/"));
+    assertEquals(escaped2, getQuery("/[a-z]\\*[123]/",qp));
     
     BooleanQuery complex = new BooleanQuery();
     complex.add(new RegexpQuery(new Term("field", "[a-z]\\/[123]")), Occur.MUST);
     complex.add(new TermQuery(new Term("path", "/etc/init.d/")), Occur.MUST);
     complex.add(new TermQuery(new Term("field", "/etc/init[.]d/lucene/")), Occur.SHOULD);
-    assertEquals(complex, qp.parse("/[a-z]\\/[123]/ AND path:\"/etc/init.d/\" OR \"/etc\\/init\\[.\\]d/lucene/\" "));
+    assertEquals(complex, getQuery("/[a-z]\\/[123]/ AND path:\"/etc/init.d/\" OR \"/etc\\/init\\[.\\]d/lucene/\" ",qp));
     
     Query re = new RegexpQuery(new Term("field", "http.*"));
-    assertEquals(re, qp.parse("field:/http.*/"));
-    assertEquals(re, qp.parse("/http.*/"));
+    assertEquals(re, getQuery("field:/http.*/",qp));
+    assertEquals(re, getQuery("/http.*/",qp));
     
     re = new RegexpQuery(new Term("field", "http~0.5"));
-    assertEquals(re, qp.parse("field:/http~0.5/"));
-    assertEquals(re, qp.parse("/http~0.5/"));
+    assertEquals(re, getQuery("field:/http~0.5/",qp));
+    assertEquals(re, getQuery("/http~0.5/",qp));
     
     re = new RegexpQuery(new Term("field", "boo"));
-    assertEquals(re, qp.parse("field:/boo/"));
-    assertEquals(re, qp.parse("/boo/"));
+    assertEquals(re, getQuery("field:/boo/",qp));
+    assertEquals(re, getQuery("/boo/",qp));
     
-    assertEquals(new TermQuery(new Term("field", "/boo/")), qp.parse("\"/boo/\""));
-    assertEquals(new TermQuery(new Term("field", "/boo/")), qp.parse("\\/boo\\/"));
+    assertEquals(new TermQuery(new Term("field", "/boo/")), getQuery("\"/boo/\"",qp));
+    assertEquals(new TermQuery(new Term("field", "/boo/")), getQuery("\\/boo\\/",qp));
     
     BooleanQuery two = new BooleanQuery();
     two.add(new RegexpQuery(new Term("field", "foo")), Occur.SHOULD);
     two.add(new RegexpQuery(new Term("field", "bar")), Occur.SHOULD);
-    assertEquals(two, qp.parse("field:/foo/ field:/bar/"));
-    assertEquals(two, qp.parse("/foo/ /bar/"));
+    assertEquals(two, getQuery("field:/foo/ field:/bar/",qp));
+    assertEquals(two, getQuery("/foo/ /bar/",qp));
   }
   
   public void testStopwords() throws Exception {
     CharacterRunAutomaton stopSet = new CharacterRunAutomaton(new RegExp("the|foo").toAutomaton());
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet, true));
-    Query result = qp.parse("a:the OR a:foo");
+    CommonQueryParserConfiguration qp = getParserConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, stopSet, true));
+    Query result = getQuery("field:the OR field:foo",qp);
     assertNotNull("result is null and it shouldn't be", result);
     assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery);
     assertTrue(((BooleanQuery) result).clauses().size() + " does not equal: " + 0, ((BooleanQuery) result).clauses().size() == 0);
-    result = qp.parse("a:woo OR a:the");
+    result = getQuery("field:woo OR field:the",qp);
     assertNotNull("result is null and it shouldn't be", result);
     assertTrue("result is not a TermQuery", result instanceof TermQuery);
-    result = qp.parse("(fieldX:xxxxx OR fieldy:xxxxxxxx)^2 AND (fieldx:the OR fieldy:foo)");
+    result = getQuery("(fieldX:xxxxx OR fieldy:xxxxxxxx)^2 AND (fieldx:the OR fieldy:foo)",qp);
     assertNotNull("result is null and it shouldn't be", result);
     assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery);
     if (VERBOSE) System.out.println("Result: " + result);
@@ -1068,12 +1023,12 @@ public abstract class QueryParserTestBas
   }
 
   public void testPositionIncrement() throws Exception {
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
+    CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true));
     qp.setEnablePositionIncrements(true);
     String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\"";
     //               0         2                      5           7  8
     int expectedPositions[] = {1,3,4,6,9};
-    PhraseQuery pq = (PhraseQuery) qp.parse(qtxt);
+    PhraseQuery pq = (PhraseQuery) getQuery(qtxt,qp);
     //System.out.println("Query text: "+qtxt);
     //System.out.println("Result: "+pq);
     Term t[] = pq.getTerms();
@@ -1085,20 +1040,23 @@ public abstract class QueryParserTestBas
   }
 
   public void testMatchAllDocs() throws Exception {
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
-    assertEquals(new MatchAllDocsQuery(), qp.parse("*:*"));
-    assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)"));
-    BooleanQuery bq = (BooleanQuery)qp.parse("+*:* -*:*");
+    CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
+    assertEquals(new MatchAllDocsQuery(), getQuery("*:*",qp));
+    assertEquals(new MatchAllDocsQuery(), getQuery("(*:*)",qp));
+    BooleanQuery bq = (BooleanQuery)getQuery("+*:* -*:*",qp);
     assertTrue(bq.getClauses()[0].getQuery() instanceof MatchAllDocsQuery);
     assertTrue(bq.getClauses()[1].getQuery() instanceof MatchAllDocsQuery);
   }
   
-  private void assertHits(int expected, String query, IndexSearcher is) throws ParseException, IOException {
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "date", new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
+  private void assertHits(int expected, String query, IndexSearcher is) throws Exception {
+    String oldDefaultField = getDefaultField();
+    setDefaultField("date");
+    CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
     qp.setLocale(Locale.ENGLISH);
-    Query q = qp.parse(query);
+    Query q = getQuery(query,qp);
     ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
     assertEquals(expected, hits.length);
+    setDefaultField( oldDefaultField );
   }
 
   @Override
@@ -1115,43 +1073,18 @@ public abstract class QueryParserTestBas
     Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, a));
     Document doc = new Document();
-    doc.add(newTextField("f", "the wizard of ozzy", Field.Store.NO));
+    doc.add(newTextField("field", "the wizard of ozzy", Field.Store.NO));
     w.addDocument(doc);
     IndexReader r = DirectoryReader.open(w, true);
     w.close();
     IndexSearcher s = newSearcher(r);
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "f", a);
-    Query q = qp.parse("\"wizard of ozzy\"");
+    
+    Query q = getQuery("\"wizard of ozzy\"",a);
     assertEquals(1, s.search(q, 1).totalHits);
     r.close();
     dir.close();
   }
 
-  // LUCENE-2002: when we run javacc to regen QueryParser,
-  // we also run a replaceregexp step to fix 2 of the public
-  // ctors (change them to protected):
-  //
-  //   protected QueryParser(CharStream stream)
-  //
-  //   protected QueryParser(QueryParserTokenManager tm)
-  //
-  // This test is here as a safety, in case that ant step
-  // doesn't work for some reason.
-  public void testProtectedCtors() throws Exception {
-    try {
-      QueryParser.class.getConstructor(new Class[] {CharStream.class});
-      fail("please switch public QueryParser(CharStream) to be protected");
-    } catch (NoSuchMethodException nsme) {
-      // expected
-    }
-    try {
-      QueryParser.class.getConstructor(new Class[] {QueryParserTokenManager.class});
-      fail("please switch public QueryParser(QueryParserTokenManager) to be protected");
-    } catch (NoSuchMethodException nsme) {
-      // expected
-    }
-  }
-  
   /**
    * adds synonym of "dog" for "dogs".
    */
@@ -1184,7 +1117,10 @@ public abstract class QueryParserTestBas
   }
   
   /** whitespace+lowercase analyzer with synonyms */
-  private class Analyzer1 extends Analyzer {
+  protected class Analyzer1 extends Analyzer {
+    public Analyzer1(){
+      super();
+    }
     @Override
     public TokenStreamComponents createComponents(String fieldName, Reader reader) {
       Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
@@ -1193,48 +1129,17 @@ public abstract class QueryParserTestBas
   }
   
   /** whitespace+lowercase analyzer without synonyms */
-  private class Analyzer2 extends Analyzer {
+  protected class Analyzer2 extends Analyzer {
+    public Analyzer2(){
+      super();
+    }
     @Override
     public TokenStreamComponents createComponents(String fieldName, Reader reader) {
       return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, true));
     }
   }
   
-  /** query parser that doesn't expand synonyms when users use double quotes */
-  private class SmartQueryParser extends QueryParser {
-    Analyzer morePrecise = new Analyzer2();
-    
-    public SmartQueryParser() {
-      super(TEST_VERSION_CURRENT, "field", new Analyzer1());
-    }
-
-    @Override
-    protected Query getFieldQuery(String field, String queryText, boolean quoted)
-        throws ParseException {
-      if (quoted)
-        return newFieldQuery(morePrecise, field, queryText, quoted);
-      else
-        return super.getFieldQuery(field, queryText, quoted);
-    }
-  }
-  
-  public void testNewFieldQuery() throws Exception {
-    /** ordinary behavior, synonyms form uncoordinated boolean query */
-    QueryParser dumb = new QueryParser(TEST_VERSION_CURRENT, "field", new Analyzer1());
-    BooleanQuery expanded = new BooleanQuery(true);
-    expanded.add(new TermQuery(new Term("field", "dogs")), BooleanClause.Occur.SHOULD);
-    expanded.add(new TermQuery(new Term("field", "dog")), BooleanClause.Occur.SHOULD);
-    assertEquals(expanded, dumb.parse("\"dogs\""));
-    /** even with the phrase operator the behavior is the same */
-    assertEquals(expanded, dumb.parse("dogs"));
-    
-    /** custom behavior, the synonyms are expanded, unless you use quote operator */
-    QueryParser smart = new SmartQueryParser();
-    assertEquals(expanded, smart.parse("dogs"));
-    
-    Query unexpanded = new TermQuery(new Term("field", "dogs"));
-    assertEquals(unexpanded, smart.parse("\"dogs\""));
-  }
+  public abstract void testNewFieldQuery() throws Exception;
   
   /**
    * Mock collation analyzer: indexes terms as "collated" + term
@@ -1267,30 +1172,31 @@ public abstract class QueryParserTestBas
   }
   
   public void testCollatedRange() throws Exception {
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockCollationAnalyzer());
-    qp.setAnalyzeRangeTerms(true);
-    Query expected = TermRangeQuery.newStringRange("field", "collatedabc", "collateddef", true, true);
-    Query actual = qp.parse("[abc TO def]");
+    CommonQueryParserConfiguration qp = getParserConfig(new MockCollationAnalyzer());
+    setAnalyzeRangeTerms(qp, true);
+    Query expected = TermRangeQuery.newStringRange(getDefaultField(), "collatedabc", "collateddef", true, true);
+    Query actual = getQuery("[abc TO def]", qp);
     assertEquals(expected, actual);
   }
 
   public void testDistanceAsEditsParsing() throws Exception {
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random()));
-    FuzzyQuery q = (FuzzyQuery) qp.parse("foobar~2");
+    FuzzyQuery q = (FuzzyQuery) getQuery("foobar~2",new MockAnalyzer(random()));
     assertEquals(2, q.getMaxEdits());
   }
 
-  public void testPhraseQueryToString() throws ParseException {
+  public void testPhraseQueryToString() throws Exception {
     Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true);
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer);
+    CommonQueryParserConfiguration qp = getParserConfig(analyzer);
     qp.setEnablePositionIncrements(true);
-    PhraseQuery q = (PhraseQuery)qp.parse("\"this hi this is a test is\"");
+    PhraseQuery q = (PhraseQuery)getQuery("\"this hi this is a test is\"", qp);
     assertEquals("field:\"? hi ? ? ? test\"", q.toString());
   }
 
-  public void testParseWildcardAndPhraseQueries() throws ParseException {
+  public void testParseWildcardAndPhraseQueries() throws Exception {
     String field = "content";
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, field, new MockAnalyzer(random()));
+    String oldDefaultField = getDefaultField();
+    setDefaultField(field);
+    CommonQueryParserConfiguration qp = getParserConfig(new MockAnalyzer(random()));
     qp.setAllowLeadingWildcard(true);
 
     String prefixQueries[][] = {
@@ -1309,7 +1215,7 @@ public abstract class QueryParserTestBas
     for (int i = 0; i < prefixQueries.length; i++) {
       for (int j = 0; j < prefixQueries[i].length; j++) {
         String queryString = prefixQueries[i][j];
-        Query q = qp.parse(queryString);
+        Query q = getQuery(queryString,qp);
         assertEquals(PrefixQuery.class, q.getClass());
       }
     }
@@ -1318,51 +1224,64 @@ public abstract class QueryParserTestBas
     for (int i = 0; i < wildcardQueries.length; i++) {
       for (int j = 0; j < wildcardQueries[i].length; j++) {
         String qtxt = wildcardQueries[i][j];
-        Query q = qp.parse(qtxt);
+        Query q = getQuery(qtxt,qp);
         assertEquals(WildcardQuery.class, q.getClass());
       }
     }
+    setDefaultField(oldDefaultField);
   }
 
   public void testPhraseQueryPositionIncrements() throws Exception {
     CharacterRunAutomaton stopStopList =
     new CharacterRunAutomaton(new RegExp("[sS][tT][oO][pP]").toAutomaton());
 
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field",
-        new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false, stopStopList, false));
+    CommonQueryParserConfiguration qp = getParserConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false, stopStopList, false));
 
     PhraseQuery phraseQuery = new PhraseQuery();
     phraseQuery.add(new Term("field", "1"));
     phraseQuery.add(new Term("field", "2"));
 
-    assertEquals(phraseQuery, qp.parse("\"1 2\""));
-    assertEquals(phraseQuery, qp.parse("\"1 stop 2\""));
+    assertEquals(phraseQuery, getQuery("\"1 2\"",qp));
+    assertEquals(phraseQuery, getQuery("\"1 stop 2\"",qp));
 
     qp.setEnablePositionIncrements(true);
-    assertEquals(phraseQuery, qp.parse("\"1 stop 2\""));
+    assertEquals(phraseQuery, getQuery("\"1 stop 2\"",qp));
 
     qp.setEnablePositionIncrements(false);
-    assertEquals(phraseQuery, qp.parse("\"1 stop 2\""));
+    assertEquals(phraseQuery, getQuery("\"1 stop 2\"",qp));
 
-    qp = new QueryParser(TEST_VERSION_CURRENT, "field",
+    qp = getParserConfig(
                          new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false, stopStopList, true));
     qp.setEnablePositionIncrements(true);
 
     phraseQuery = new PhraseQuery();
     phraseQuery.add(new Term("field", "1"));
     phraseQuery.add(new Term("field", "2"), 2);
-    assertEquals(phraseQuery, qp.parse("\"1 stop 2\""));
+    assertEquals(phraseQuery, getQuery("\"1 stop 2\"",qp));
   }
 
   public void testMatchAllQueryParsing() throws Exception {
     // test simple parsing of MatchAllDocsQuery
-    QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "key", new MockAnalyzer(random()));
-    assertEquals(new MatchAllDocsQuery(), qp.parse(new MatchAllDocsQuery().toString()));
+    String oldDefaultField = getDefaultField();
+    setDefaultField("key");
+    CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random()));
+    assertEquals(new MatchAllDocsQuery(), getQuery(new MatchAllDocsQuery().toString(),qp));
 
     // test parsing with non-default boost
     MatchAllDocsQuery query = new MatchAllDocsQuery();
     query.setBoost(2.3f);
-    assertEquals(query, qp.parse(query.toString()));
+    assertEquals(query, getQuery(query.toString(),qp));
+    setDefaultField(oldDefaultField);
+  }
+
+  public void testNestedAndClausesFoo() throws Exception {
+    String query = "(field1:[1 TO *] AND field1:[* TO 2]) AND field2:(z)";
+    BooleanQuery q = new BooleanQuery();
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(TermRangeQuery.newStringRange("field1", "1", null, true, true), BooleanClause.Occur.MUST);
+    bq.add(TermRangeQuery.newStringRange("field1", null, "2", true, true), BooleanClause.Occur.MUST);
+    q.add(bq, BooleanClause.Occur.MUST);
+    q.add(new TermQuery(new Term("field2", "z")), BooleanClause.Occur.MUST);
+    assertEquals(q, getQuery(query, new MockAnalyzer(random())));
   }
-  
 }

Modified: lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java Mon Aug 13 13:52:46 2012
@@ -102,7 +102,7 @@ public class DuplicateFilter extends Fil
       if (currTerm == null) {
         break;
       } else {
-        docs = termsEnum.docs(acceptDocs, docs, false);
+        docs = termsEnum.docs(acceptDocs, docs, 0);
         int doc = docs.nextDoc();
         if (doc != DocIdSetIterator.NO_MORE_DOCS) {
           if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) {
@@ -142,7 +142,7 @@ public class DuplicateFilter extends Fil
       } else {
         if (termsEnum.docFreq() > 1) {
           // unset potential duplicates
-          docs = termsEnum.docs(acceptDocs, docs, false);
+          docs = termsEnum.docs(acceptDocs, docs, 0);
           int doc = docs.nextDoc();
           if (doc != DocIdSetIterator.NO_MORE_DOCS) {
             if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) {

Modified: lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java Mon Aug 13 13:52:46 2012
@@ -139,7 +139,7 @@ public class DuplicateFilterTest extends
                                    new BytesRef(url),
                                    MultiFields.getLiveDocs(reader),
                                    null,
-                                   false);
+                                   0);
 
       int lastDoc = 0;
       while (td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
@@ -163,7 +163,7 @@ public class DuplicateFilterTest extends
                                    new BytesRef(url),
                                    MultiFields.getLiveDocs(reader),
                                    null,
-                                   false);
+                                   0);
 
       int lastDoc = 0;
       td.nextDoc();

Modified: lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowFuzzyQuery.java Mon Aug 13 13:52:46 2012
@@ -440,21 +440,25 @@ public class TestSlowFuzzyQuery extends 
     assertEquals(1, hits.length);
     assertEquals("foobar", searcher.doc(hits[0].doc).get("field"));
     
-    q = new SlowFuzzyQuery(new Term("field", "t"), 3);
-    hits = searcher.search(q, 10).scoreDocs;
-    assertEquals(1, hits.length);
-    assertEquals("test", searcher.doc(hits[0].doc).get("field"));
+    // TODO: cannot really be supported given the legacy scoring
+    // system which scores negative, if the distance > min term len,
+    // so such matches were always impossible with lucene 3.x, etc
+    //
+    //q = new SlowFuzzyQuery(new Term("field", "t"), 3);
+    //hits = searcher.search(q, 10).scoreDocs;
+    //assertEquals(1, hits.length);
+    //assertEquals("test", searcher.doc(hits[0].doc).get("field"));
     
-    q = new SlowFuzzyQuery(new Term("field", "a"), 4f, 0, 50);
-    hits = searcher.search(q, 10).scoreDocs;
-    assertEquals(1, hits.length);
-    assertEquals("test", searcher.doc(hits[0].doc).get("field"));
+    // q = new SlowFuzzyQuery(new Term("field", "a"), 4f, 0, 50);
+    // hits = searcher.search(q, 10).scoreDocs;
+    // assertEquals(1, hits.length);
+    // assertEquals("test", searcher.doc(hits[0].doc).get("field"));
     
-    q = new SlowFuzzyQuery(new Term("field", "a"), 6f, 0, 50);
-    hits = searcher.search(q, 10).scoreDocs;
-    assertEquals(2, hits.length);
-    assertEquals("test", searcher.doc(hits[0].doc).get("field"));
-    assertEquals("foobar", searcher.doc(hits[1].doc).get("field"));
+    // q = new SlowFuzzyQuery(new Term("field", "a"), 6f, 0, 50);
+    // hits = searcher.search(q, 10).scoreDocs;
+    // assertEquals(2, hits.length);
+    // assertEquals("test", searcher.doc(hits[0].doc).get("field"));
+    // assertEquals("foobar", searcher.doc(hits[1].doc).get("field"));
     
     reader.close();
     index.close();

Modified: lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/SpatialStrategy.java Mon Aug 13 13:52:46 2012
@@ -28,10 +28,26 @@ import org.apache.lucene.search.Query;
 import org.apache.lucene.spatial.query.SpatialArgs;
 
 /**
- * The SpatialStrategy encapsulates an approach to indexing and searching based on shapes.
+ * The SpatialStrategy encapsulates an approach to indexing and searching based
+ * on shapes.
  * <p/>
- * Note that a SpatialStrategy is not involved with the Lucene stored field values of shapes, which is
- * immaterial to indexing & search.
+ * Different implementations will support different features. A strategy should
+ * document these common elements:
+ * <ul>
+ *   <li>Can it index more than one shape per field?</li>
+ *   <li>What types of shapes can be indexed?</li>
+ *   <li>What types of query shapes can be used?</li>
+ *   <li>What types of query operations are supported?
+ *   This might vary per shape.</li>
+ *   <li>Are there caches?  Under what circumstances are they used?
+ *   Roughly how big are they?  Is it segmented by Lucene segments, such as is
+ *   done by the Lucene {@link org.apache.lucene.search.FieldCache} and
+ *   {@link org.apache.lucene.index.DocValues} (ideal) or is it for the entire
+ *   index?
+ * </ul>
+ * <p/>
+ * Note that a SpatialStrategy is not involved with the Lucene stored field
+ * values of shapes, which is immaterial to indexing & search.
  * <p/>
  * Thread-safe.
  *

Modified: lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java Mon Aug 13 13:52:46 2012
@@ -24,13 +24,14 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.spatial.SpatialStrategy;
 import org.apache.lucene.spatial.prefix.tree.Node;
 import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
 import org.apache.lucene.spatial.query.SpatialArgs;
-import org.apache.lucene.spatial.util.CachedDistanceValueSource;
+import org.apache.lucene.spatial.util.ShapeFieldCacheDistanceValueSource;
 
 import java.util.Iterator;
 import java.util.List;
@@ -88,6 +89,7 @@ public abstract class PrefixTreeStrategy
     FIELD_TYPE.setIndexed(true);
     FIELD_TYPE.setTokenized(true);
     FIELD_TYPE.setOmitNorms(true);
+    FIELD_TYPE.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY);
     FIELD_TYPE.freeze();
   }
 
@@ -144,7 +146,7 @@ public abstract class PrefixTreeStrategy
       }
     }
     Point point = args.getShape().getCenter();
-    return new CachedDistanceValueSource(point, calc, p);
+    return new ShapeFieldCacheDistanceValueSource(point, calc, p);
   }
 
   public SpatialPrefixTree getGrid() {

Modified: lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeFilter.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeFilter.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeFilter.java Mon Aug 13 13:52:46 2012
@@ -112,7 +112,7 @@ RE "scan" threshold:
       if (seekStat == TermsEnum.SeekStatus.NOT_FOUND)
         continue;
       if (cell.getLevel() == detailLevel || cell.isLeaf()) {
-        docsEnum = termsEnum.docs(acceptDocs, docsEnum, false);
+        docsEnum = termsEnum.docs(acceptDocs, docsEnum, 0);
         addDocs(docsEnum,bits);
       } else {//any other intersection
         //If the next indexed term is the leaf marker, then add all of them
@@ -120,7 +120,7 @@ RE "scan" threshold:
         assert StringHelper.startsWith(nextCellTerm, cellTerm);
         scanCell = grid.getNode(nextCellTerm.bytes, nextCellTerm.offset, nextCellTerm.length, scanCell);
         if (scanCell.isLeaf()) {
-          docsEnum = termsEnum.docs(acceptDocs, docsEnum, false);
+          docsEnum = termsEnum.docs(acceptDocs, docsEnum, 0);
           addDocs(docsEnum,bits);
           termsEnum.next();//move pointer to avoid potential redundant addDocs() below
         }
@@ -145,7 +145,7 @@ RE "scan" threshold:
               if(queryShape.relate(cShape, grid.getSpatialContext()) == SpatialRelation.DISJOINT)
                 continue;
 
-              docsEnum = termsEnum.docs(acceptDocs, docsEnum, false);
+              docsEnum = termsEnum.docs(acceptDocs, docsEnum, 0);
               addDocs(docsEnum,bits);
             }
           }//term loop

Modified: lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java Mon Aug 13 13:52:46 2012
@@ -64,7 +64,7 @@ public abstract class ShapeFieldCachePro
       while (term != null) {
         T shape = readShape(term);
         if( shape != null ) {
-          docs = te.docs(null, docs, false);
+          docs = te.docs(null, docs, 0);
           Integer docid = docs.nextDoc();
           while (docid != DocIdSetIterator.NO_MORE_DOCS) {
             idx.add( docid, shape );

Modified: lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/overview.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/overview.html?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/overview.html (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/spatial/src/java/overview.html Mon Aug 13 13:52:46 2012
@@ -16,8 +16,49 @@
 -->
 <html>
   <head>
-    <title>Apache Lucene Spatial Strategies</title>
+    <title>Apache Lucene Spatial Module</title>
   </head>
   <body>
+
+  <h1>The Spatial Module for Apache Lucene</h1>
+
+  <p>
+    The spatial module is new is Lucene 4, replacing the old contrib module
+    that came before it. The principle interface to the module is
+    a {@link org.apache.lucene.spatial.SpatialStrategy}
+    which encapsulates an approach to indexing and searching
+    based on shapes.  Different Strategies have different features and
+    performance profiles, which are documented at each Strategy class level.
+  </p>
+  <p>
+    For some sample code showing how to use the API, see SpatialExample.java in
+    the tests.
+  </p>
+  <p>
+    The spatial module uses
+    <a href="https://github.com/spatial4j/spatial4j">Spatial4j</a>
+    heavily.  Spatial4j is an ASL licensed library with these capabilities:
+    <ul>
+    <li>Provides shape implementations, namely point, rectangle,
+      and circle.  Both geospatial contexts and plain 2D Euclidean/Cartesian contexts
+      are supported.
+      With an additional dependency, it adds polygon and other geometry shape
+      support via integration with
+      <a href="http://sourceforge.net/projects/jts-topo-suite/">JTS Topology Suite</a>.
+      This includes dateline wrap support.</li>
+    <li>Shape parsing and serialization, including
+      <a href="http://en.wikipedia.org/wiki/Well-known_text">Well-Known Text (WKT)</a>
+      (via JTS).</li>
+    <li>Distance and other spatial related math calculations.</li>
+    </ul>
+  </p>
+  <p>
+    Historical note: The new spatial module was once known as
+    Lucene Spatial Playground (LSP) as an external project.  In ~March 2012, LSP
+    split into this new module as part of Lucene and Spatial4j externally. A
+    large chunk of the LSP implementation originated as SOLR-2155 which uses
+    trie/prefix-tree algorithms with a geohash encoding.
+  </p>
+
   </body>
 </html>
\ No newline at end of file

Modified: lucene/dev/branches/LUCENE-2878/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java Mon Aug 13 13:52:46 2012
@@ -39,6 +39,7 @@ import org.apache.lucene.store.AlreadyCl
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.English;
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NamedThreadFactory;
 
 /**
  * Spell checker test case
@@ -413,7 +414,7 @@ public class TestSpellChecker extends Lu
     int num_field2 = this.numdoc();
     assertEquals(num_field2, num_field1 + 1);
     int numThreads = 5 + random().nextInt(5);
-    ExecutorService executor = Executors.newFixedThreadPool(numThreads);
+    ExecutorService executor = Executors.newFixedThreadPool(numThreads, new NamedThreadFactory("testConcurrentAccess"));
     SpellCheckWorker[] workers = new SpellCheckWorker[numThreads];
     for (int i = 0; i < numThreads; i++) {
       SpellCheckWorker spellCheckWorker = new SpellCheckWorker(r);

Modified: lucene/dev/branches/LUCENE-2878/lucene/test-framework/ivy.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/test-framework/ivy.xml?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/test-framework/ivy.xml (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/test-framework/ivy.xml Mon Aug 13 13:52:46 2012
@@ -33,8 +33,8 @@
       <dependency org="org.apache.ant" name="ant-junit" rev="1.8.2" transitive="false" />
 
       <dependency org="junit" name="junit" rev="4.10" transitive="false" conf="default->*;junit4-stdalone->*" />
-            <dependency org="com.carrotsearch.randomizedtesting" name="junit4-ant" rev="1.6.0" transitive="false" conf="default->*;junit4-stdalone->*" />
-      <dependency org="com.carrotsearch.randomizedtesting" name="randomizedtesting-runner" rev="1.6.0" transitive="false" conf="default->*;junit4-stdalone->*" />
+      <dependency org="com.carrotsearch.randomizedtesting" name="junit4-ant" rev="2.0.0.rc5" transitive="false" conf="default->*;junit4-stdalone->*" />
+      <dependency org="com.carrotsearch.randomizedtesting" name="randomizedtesting-runner" rev="2.0.0.rc5" transitive="false" conf="default->*;junit4-stdalone->*" />
 
       <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/> 
     </dependencies>

Modified: lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java Mon Aug 13 13:52:46 2012
@@ -473,14 +473,6 @@ public abstract class BaseTokenStreamTes
     }
   }
 
-  static final Set<String> doesntSupportOffsets = new HashSet<String>(Arrays.asList( 
-    "Lucene3x",
-    "MockFixedIntBlock",
-    "MockVariableIntBlock",
-    "MockSep",
-    "MockRandom"
-  ));
-
   private static void checkRandomData(Random random, Analyzer a, int iterations, int maxWordLength, boolean useCharFilter, boolean simple, boolean offsetsAreCorrect, RandomIndexWriter iw) throws IOException {
 
     final LineFileDocs docs = new LineFileDocs(random);
@@ -494,6 +486,9 @@ public abstract class BaseTokenStreamTes
         ft.setStoreTermVectors(true);
         ft.setStoreTermVectorOffsets(random.nextBoolean());
         ft.setStoreTermVectorPositions(random.nextBoolean());
+        if (ft.storeTermVectorPositions()) {
+          ft.setStoreTermVectorPayloads(random.nextBoolean());
+        }
       }
       if (random.nextBoolean()) {
         ft.setOmitNorms(true);

Modified: lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java Mon Aug 13 13:52:46 2012
@@ -18,16 +18,24 @@ package org.apache.lucene.codecs.asserti
  */
 
 import java.io.IOException;
+import java.util.Comparator;
+import java.util.Iterator;
 
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.PostingsConsumer;
 import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.TermStats;
+import org.apache.lucene.codecs.TermsConsumer;
 import org.apache.lucene.codecs.lucene40.Lucene40PostingsFormat;
 import org.apache.lucene.index.AssertingAtomicReader;
-import org.apache.lucene.index.FieldsEnum;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.SegmentWriteState;
 import org.apache.lucene.index.Terms;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.OpenBitSet;
 
 /**
  * Just like {@link Lucene40PostingsFormat} but with additional asserts.
@@ -39,10 +47,9 @@ public class AssertingPostingsFormat ext
     super("Asserting");
   }
   
-  // TODO: we could add some useful checks here?
   @Override
   public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
-    return in.fieldsConsumer(state);
+    return new AssertingFieldsConsumer(in.fieldsConsumer(state));
   }
 
   @Override
@@ -63,10 +70,10 @@ public class AssertingPostingsFormat ext
     }
 
     @Override
-    public FieldsEnum iterator() throws IOException {
-      FieldsEnum iterator = in.iterator();
+    public Iterator<String> iterator() {
+      Iterator<String> iterator = in.iterator();
       assert iterator != null;
-      return new AssertingAtomicReader.AssertingFieldsEnum(iterator);
+      return iterator;
     }
 
     @Override
@@ -85,4 +92,164 @@ public class AssertingPostingsFormat ext
       return in.getUniqueTermCount();
     }
   }
+  
+  static class AssertingFieldsConsumer extends FieldsConsumer {
+    private final FieldsConsumer in;
+    
+    AssertingFieldsConsumer(FieldsConsumer in) {
+      this.in = in;
+    }
+    
+    @Override
+    public TermsConsumer addField(FieldInfo field) throws IOException {
+      TermsConsumer consumer = in.addField(field);
+      assert consumer != null;
+      return new AssertingTermsConsumer(consumer, field);
+    }
+
+    @Override
+    public void close() throws IOException {
+      in.close();
+    }
+  }
+  
+  static enum TermsConsumerState { INITIAL, START, FINISHED };
+  static class AssertingTermsConsumer extends TermsConsumer {
+    private final TermsConsumer in;
+    private final FieldInfo fieldInfo;
+    private BytesRef lastTerm = null;
+    private TermsConsumerState state = TermsConsumerState.INITIAL;
+    private AssertingPostingsConsumer lastPostingsConsumer = null;
+    private long sumTotalTermFreq = 0;
+    private long sumDocFreq = 0;
+    private OpenBitSet visitedDocs = new OpenBitSet();
+    
+    AssertingTermsConsumer(TermsConsumer in, FieldInfo fieldInfo) {
+      this.in = in;
+      this.fieldInfo = fieldInfo;
+    }
+    
+    @Override
+    public PostingsConsumer startTerm(BytesRef text) throws IOException {
+      assert state == TermsConsumerState.INITIAL || state == TermsConsumerState.START && lastPostingsConsumer.docFreq == 0;
+      state = TermsConsumerState.START;
+      assert lastTerm == null || in.getComparator().compare(text, lastTerm) > 0;
+      lastTerm = BytesRef.deepCopyOf(text);
+      return lastPostingsConsumer = new AssertingPostingsConsumer(in.startTerm(text), fieldInfo, visitedDocs);
+    }
+
+    @Override
+    public void finishTerm(BytesRef text, TermStats stats) throws IOException {
+      assert state == TermsConsumerState.START;
+      state = TermsConsumerState.INITIAL;
+      assert text.equals(lastTerm);
+      assert stats.docFreq > 0; // otherwise, this method should not be called.
+      assert stats.docFreq == lastPostingsConsumer.docFreq;
+      sumDocFreq += stats.docFreq;
+      if (fieldInfo.getIndexOptions() == IndexOptions.DOCS_ONLY) {
+        assert stats.totalTermFreq == -1;
+      } else {
+        assert stats.totalTermFreq == lastPostingsConsumer.totalTermFreq;
+        sumTotalTermFreq += stats.totalTermFreq;
+      }
+      in.finishTerm(text, stats);
+    }
+
+    @Override
+    public void finish(long sumTotalTermFreq, long sumDocFreq, int docCount) throws IOException {
+      assert state == TermsConsumerState.INITIAL || state == TermsConsumerState.START && lastPostingsConsumer.docFreq == 0;
+      state = TermsConsumerState.FINISHED;
+      assert docCount >= 0;
+      assert docCount == visitedDocs.cardinality();
+      assert sumDocFreq >= docCount;
+      assert sumDocFreq == this.sumDocFreq;
+      if (fieldInfo.getIndexOptions() == IndexOptions.DOCS_ONLY) {
+        assert sumTotalTermFreq == -1;
+      } else {
+        assert sumTotalTermFreq >= sumDocFreq;
+        assert sumTotalTermFreq == this.sumTotalTermFreq;
+      }
+      in.finish(sumTotalTermFreq, sumDocFreq, docCount);
+    }
+
+    @Override
+    public Comparator<BytesRef> getComparator() throws IOException {
+      return in.getComparator();
+    }
+  }
+  
+  static enum PostingsConsumerState { INITIAL, START };
+  static class AssertingPostingsConsumer extends PostingsConsumer {
+    private final PostingsConsumer in;
+    private final FieldInfo fieldInfo;
+    private final OpenBitSet visitedDocs;
+    private PostingsConsumerState state = PostingsConsumerState.INITIAL;
+    private int freq;
+    private int positionCount;
+    private int lastPosition = 0;
+    private int lastStartOffset = 0;
+    int docFreq = 0;
+    long totalTermFreq = 0;
+    
+    AssertingPostingsConsumer(PostingsConsumer in, FieldInfo fieldInfo, OpenBitSet visitedDocs) {
+      this.in = in;
+      this.fieldInfo = fieldInfo;
+      this.visitedDocs = visitedDocs;
+    }
+
+    @Override
+    public void startDoc(int docID, int freq) throws IOException {
+      assert state == PostingsConsumerState.INITIAL;
+      state = PostingsConsumerState.START;
+      assert docID >= 0;
+      if (fieldInfo.getIndexOptions() == IndexOptions.DOCS_ONLY) {
+        assert freq == -1;
+        this.freq = 0; // we don't expect any positions here
+      } else {
+        assert freq > 0;
+        this.freq = freq;
+        totalTermFreq += freq;
+      }
+      this.positionCount = 0;
+      this.lastPosition = 0;
+      this.lastStartOffset = 0;
+      docFreq++;
+      visitedDocs.set(docID);
+      in.startDoc(docID, freq);
+    }
+
+    @Override
+    public void addPosition(int position, BytesRef payload, int startOffset, int endOffset) throws IOException {
+      assert state == PostingsConsumerState.START;
+      assert positionCount < freq;
+      positionCount++;
+      assert position >= lastPosition || position == -1; /* we still allow -1 from old 3.x indexes */
+      lastPosition = position;
+      if (fieldInfo.getIndexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
+        assert startOffset >= 0;
+        assert startOffset >= lastStartOffset;
+        lastStartOffset = startOffset;
+        assert endOffset >= startOffset;
+      } else {
+        assert startOffset == -1;
+        assert endOffset == -1;
+      }
+      if (payload != null) {
+        assert fieldInfo.hasPayloads();
+      }
+      in.addPosition(position, payload, startOffset, endOffset);
+    }
+
+    @Override
+    public void finishDoc() throws IOException {
+      assert state == PostingsConsumerState.START;
+      state = PostingsConsumerState.INITIAL;
+      if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+        assert positionCount == 0; // we should not have fed any positions!
+      } else {
+        assert positionCount == freq;
+      }
+      in.finishDoc();
+    }
+  }
 }

Modified: lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java Mon Aug 13 13:52:46 2012
@@ -39,7 +39,6 @@ import org.apache.lucene.index.DocsAndPo
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldsEnum;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.SegmentWriteState;
@@ -50,6 +49,7 @@ import org.apache.lucene.store.IndexOutp
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.UnmodifiableIterator;
 
 /** Stores all postings data in RAM, but writes a small
  *  token (header + single int) to identify which "slot" the
@@ -112,8 +112,8 @@ public class RAMOnlyPostingsFormat exten
     }
 
     @Override
-    public FieldsEnum iterator() {
-      return new RAMFieldsEnum(this);
+    public Iterator<String> iterator() {
+      return new UnmodifiableIterator<String>(fieldToTerms.keySet().iterator());
     }
 
     @Override
@@ -127,9 +127,11 @@ public class RAMOnlyPostingsFormat exten
     long sumTotalTermFreq;
     long sumDocFreq;
     int docCount;
+    final FieldInfo info;
 
-    RAMField(String field) {
+    RAMField(String field, FieldInfo info) {
       this.field = field;
+      this.info = info;
     }
 
     @Override
@@ -161,6 +163,21 @@ public class RAMOnlyPostingsFormat exten
     public Comparator<BytesRef> getComparator() {
       return reverseUnicodeComparator;
     }
+
+    @Override
+    public boolean hasOffsets() {
+      return info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
+    }
+
+    @Override
+    public boolean hasPositions() {
+      return info.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
+    }
+    
+    @Override
+    public boolean hasPayloads() {
+      return info.hasPayloads();
+    }
   }
 
   static class RAMTerm {
@@ -198,7 +215,7 @@ public class RAMOnlyPostingsFormat exten
       if (field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0) {
         throw new UnsupportedOperationException("this codec cannot index offsets");
       }
-      RAMField ramField = new RAMField(field.name);
+      RAMField ramField = new RAMField(field.name, field);
       postings.fieldToTerms.put(field.name, ramField);
       termsConsumer.reset(ramField);
       return termsConsumer;
@@ -286,33 +303,6 @@ public class RAMOnlyPostingsFormat exten
     }
   }
 
-  // Classes for reading from the postings state
-  static class RAMFieldsEnum extends FieldsEnum {
-    private final RAMPostings postings;
-    private final Iterator<String> it;
-    private String current;
-
-    public RAMFieldsEnum(RAMPostings postings) {
-      this.postings = postings;
-      this.it = postings.fieldToTerms.keySet().iterator();
-    }
-
-    @Override
-    public String next() {
-      if (it.hasNext()) {
-        current = it.next();
-      } else {
-        current = null;
-      }
-      return current;
-    }
-
-    @Override
-    public Terms terms() {
-      return postings.fieldToTerms.get(current);
-    }
-  }
-
   static class RAMTermsEnum extends TermsEnum {
     Iterator<String> it;
     String current;
@@ -386,15 +376,12 @@ public class RAMOnlyPostingsFormat exten
     }
 
     @Override
-    public DocsEnum docs(Bits liveDocs, DocsEnum reuse, boolean needsFreqs) {
+    public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
       return new RAMDocsEnum(ramField.termToDocs.get(current), liveDocs);
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, boolean needsOffsets) {
-      if (needsOffsets) {
-        return null;
-      }
+    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
       return new RAMDocsAndPositionsEnum(ramField.termToDocs.get(current), liveDocs);
     }
   }
@@ -510,13 +497,12 @@ public class RAMOnlyPostingsFormat exten
     }
 
     @Override
-    public boolean hasPayload() {
-      return current.payloads != null && current.payloads[posUpto-1] != null;
-    }
-
-    @Override
     public BytesRef getPayload() {
-      return new BytesRef(current.payloads[posUpto-1]);
+      if (current.payloads != null && current.payloads[posUpto-1] != null) {
+        return new BytesRef(current.payloads[posUpto-1]);
+      } else {
+        return null;
+      }
     }
   }
 

Modified: lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/AssertingAtomicReader.java Mon Aug 13 13:52:46 2012
@@ -1,6 +1,7 @@
 package org.apache.lucene.index;
 
 import java.io.IOException;
+import java.util.Iterator;
 
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.util.Bits;
@@ -60,10 +61,10 @@ public class AssertingAtomicReader exten
     }
 
     @Override
-    public FieldsEnum iterator() throws IOException {
-      FieldsEnum fieldsEnum = super.iterator();
-      assert fieldsEnum != null;
-      return new AssertingFieldsEnum(fieldsEnum);
+    public Iterator<String> iterator() {
+      Iterator<String> iterator = super.iterator();
+      assert iterator != null;
+      return iterator;
     }
 
     @Override
@@ -74,21 +75,6 @@ public class AssertingAtomicReader exten
   }
   
   /**
-   * Wraps a FieldsEnum but with additional asserts
-   */
-  public static class AssertingFieldsEnum extends FilterFieldsEnum {
-    public AssertingFieldsEnum(FieldsEnum in) {
-      super(in);
-    }
-
-    @Override
-    public Terms terms() throws IOException {
-      Terms terms = super.terms();
-      return terms == null ? null : new AssertingTerms(terms);
-    }
-  }
-  
-  /**
    * Wraps a Terms but with additional asserts
    */
   public static class AssertingTerms extends FilterTerms {
@@ -125,7 +111,7 @@ public class AssertingAtomicReader exten
     }
 
     @Override
-    public DocsEnum docs(Bits liveDocs, DocsEnum reuse, boolean needsFreqs) throws IOException {
+    public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
       assert state == State.POSITIONED: "docs(...) called on unpositioned TermsEnum";
 
       // TODO: should we give this thing a random to be super-evil,
@@ -133,12 +119,12 @@ public class AssertingAtomicReader exten
       if (reuse instanceof AssertingDocsEnum) {
         reuse = ((AssertingDocsEnum) reuse).in;
       }
-      DocsEnum docs = super.docs(liveDocs, reuse, needsFreqs);
+      DocsEnum docs = super.docs(liveDocs, reuse, flags);
       return docs == null ? null : new AssertingDocsEnum(docs);
     }
 
     @Override
-    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, boolean needsOffsets) throws IOException {
+    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
       assert state == State.POSITIONED: "docsAndPositions(...) called on unpositioned TermsEnum";
 
       // TODO: should we give this thing a random to be super-evil,
@@ -146,7 +132,7 @@ public class AssertingAtomicReader exten
       if (reuse instanceof AssertingDocsAndPositionsEnum) {
         reuse = ((AssertingDocsAndPositionsEnum) reuse).in;
       }
-      DocsAndPositionsEnum docs = super.docsAndPositions(liveDocs, reuse, needsOffsets);
+      DocsAndPositionsEnum docs = super.docsAndPositions(liveDocs, reuse, flags);
       return docs == null ? null : new AssertingDocsAndPositionsEnum(docs);
     }
 
@@ -365,15 +351,22 @@ public class AssertingAtomicReader exten
       assert state != DocsEnumState.START : "getPayload() called before nextDoc()/advance()";
       assert state != DocsEnumState.FINISHED : "getPayload() called after NO_MORE_DOCS";
       assert positionCount > 0 : "getPayload() called before nextPosition()!";
-      return super.getPayload();
+      BytesRef payload = super.getPayload();
+      assert payload == null || payload.length > 0 : "getPayload() returned payload with invalid length!";
+      return payload;
     }
+  }
 
-    @Override
-    public boolean hasPayload() {
-      assert state != DocsEnumState.START : "hasPayload() called before nextDoc()/advance()";
-      assert state != DocsEnumState.FINISHED : "hasPayload() called after NO_MORE_DOCS";
-      assert positionCount > 0 : "hasPayload() called before nextPosition()!";
-      return super.hasPayload();
-    }
+  // this is the same hack as FCInvisible
+  @Override
+  public Object getCoreCacheKey() {
+    return cacheKey;
   }
+
+  @Override
+  public Object getCombinedCoreAndDeletesKey() {
+    return cacheKey;
+  }
+  
+  private final Object cacheKey = new Object();
 }
\ No newline at end of file

Modified: lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java Mon Aug 13 13:52:46 2012
@@ -19,6 +19,8 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
 import java.util.Set;
 
 public final class FieldFilterAtomicReader extends FilterAtomicReader {
@@ -125,15 +127,16 @@ public final class FieldFilterAtomicRead
   }
   
   private class FieldFilterFields extends FilterFields {
+
     public FieldFilterFields(Fields in) {
       super(in);
     }
 
     @Override
-    public int size() throws IOException {
+    public int size() {
       // TODO: add faster implementation!
       int c = 0;
-      final FieldsEnum it = iterator();
+      final Iterator<String> it = iterator();
       while (it.next() != null) {
         c++;
       }
@@ -141,16 +144,46 @@ public final class FieldFilterAtomicRead
     }
 
     @Override
-    public FieldsEnum iterator() throws IOException {
-      return new FilterFieldsEnum(super.iterator()) {
+    public Iterator<String> iterator() {
+      final Iterator<String> in = super.iterator();
+      return new Iterator<String>() {
+        String cached = null;
+        
+        @Override
+        public String next() {
+          if (cached != null) {
+            String next = cached;
+            cached = null;
+            return next;
+          } else {
+            String next = doNext();
+            if (next == null) {
+              throw new NoSuchElementException();
+            } else {
+              return next;
+            }
+          }
+        }
+
         @Override
-        public String next() throws IOException {
-          String f;
-          while ((f = super.next()) != null) {
-            if (hasField(f)) return f;
+        public boolean hasNext() {
+          return cached != null || (cached = doNext()) != null;
+        }
+        
+        private String doNext() {
+          while (in.hasNext()) {
+            String field = in.next();
+            if (hasField(field)) {
+              return field;
+            }
           }
           return null;
-        } 
+        }
+
+        @Override
+        public void remove() {
+          throw new UnsupportedOperationException();
+        }
       };
     }
 

Modified: lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java?rev=1372423&r1=1372422&r2=1372423&view=diff
==============================================================================
--- lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java (original)
+++ lucene/dev/branches/LUCENE-2878/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java Mon Aug 13 13:52:46 2012
@@ -29,9 +29,11 @@ import java.util.Set;
 
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.asserting.AssertingPostingsFormat;
+import org.apache.lucene.codecs.bloom.TestBloomFilteredLucene40Postings;
 import org.apache.lucene.codecs.lucene40.Lucene40Codec;
 import org.apache.lucene.codecs.lucene40.Lucene40PostingsFormat;
 import org.apache.lucene.codecs.lucene40ords.Lucene40WithOrds;
+import org.apache.lucene.codecs.memory.DirectPostingsFormat;
 import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
 import org.apache.lucene.codecs.mockintblock.MockFixedIntBlockPostingsFormat;
 import org.apache.lucene.codecs.mockintblock.MockVariableIntBlockPostingsFormat;
@@ -40,6 +42,7 @@ import org.apache.lucene.codecs.mocksep.
 import org.apache.lucene.codecs.nestedpulsing.NestedPulsingPostingsFormat;
 import org.apache.lucene.codecs.pulsing.Pulsing40PostingsFormat;
 import org.apache.lucene.codecs.simpletext.SimpleTextPostingsFormat;
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
 
 /**
@@ -87,12 +90,19 @@ public class RandomCodec extends Lucene4
     // block via CL:
     int minItemsPerBlock = _TestUtil.nextInt(random, 2, 100);
     int maxItemsPerBlock = 2*(Math.max(2, minItemsPerBlock-1)) + random.nextInt(100);
+    int lowFreqCutoff = _TestUtil.nextInt(random, 2, 100);
 
     add(avoidCodecs,
         new Lucene40PostingsFormat(minItemsPerBlock, maxItemsPerBlock),
+        new DirectPostingsFormat(LuceneTestCase.rarely(random) ? 1 : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : maxItemsPerBlock),
+                                 LuceneTestCase.rarely(random) ? 1 : (LuceneTestCase.rarely(random) ? Integer.MAX_VALUE : lowFreqCutoff)),
         new Pulsing40PostingsFormat(1 + random.nextInt(20), minItemsPerBlock, maxItemsPerBlock),
         // add pulsing again with (usually) different parameters
         new Pulsing40PostingsFormat(1 + random.nextInt(20), minItemsPerBlock, maxItemsPerBlock),
+        //TODO as a PostingsFormat which wraps others, we should allow TestBloomFilteredLucene40Postings to be constructed 
+        //with a choice of concrete PostingsFormats. Maybe useful to have a generic means of marking and dealing 
+        //with such "wrapper" classes?
+        new TestBloomFilteredLucene40Postings(),                
         new MockSepPostingsFormat(),
         new MockFixedIntBlockPostingsFormat(_TestUtil.nextInt(random, 1, 2000)),
         new MockVariableIntBlockPostingsFormat( _TestUtil.nextInt(random, 1, 127)),