You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2014/01/17 18:23:44 UTC

svn commit: r1559196 [14/19] - in /lucene/dev/branches/lucene5376: ./ dev-tools/ dev-tools/idea/solr/contrib/morphlines-cell/ dev-tools/maven/lucene/facet/ lucene/ lucene/analysis/ lucene/analysis/common/ lucene/analysis/common/src/java/org/apache/luce...

Modified: lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java (original)
+++ lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeFacetCounts.java Fri Jan 17 17:23:33 2014
@@ -57,14 +57,6 @@ public class LongRangeFacetCounts extend
 
     LongRange[] ranges = (LongRange[]) this.ranges;
 
-    // Compute min & max over all ranges:
-    long minIncl = Long.MAX_VALUE;
-    long maxIncl = Long.MIN_VALUE;
-    for(LongRange range : ranges) {
-      minIncl = Math.min(minIncl, range.minIncl);
-      maxIncl = Math.max(maxIncl, range.maxIncl);
-    }
-
     LongRangeCounter counter = new LongRangeCounter(ranges);
 
     int missingCount = 0;

Modified: lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetField.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetField.java (original)
+++ lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetField.java Fri Jan 17 17:23:33 2014
@@ -19,6 +19,7 @@ package org.apache.lucene.facet.sortedse
 
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.facet.FacetField;
 
 /** Add an instance of this to your Document for every facet
  *  label to be indexed via SortedSetDocValues. */
@@ -40,6 +41,8 @@ public class SortedSetDocValuesFacetFiel
   /** Sole constructor. */
   public SortedSetDocValuesFacetField(String dim, String label) {
     super("dummy", TYPE);
+    FacetField.verifyLabel(label);
+    FacetField.verifyLabel(dim);
     this.dim = dim;
     this.label = label;
   }

Modified: lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/AssociationFacetField.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/AssociationFacetField.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/AssociationFacetField.java (original)
+++ lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/AssociationFacetField.java Fri Jan 17 17:23:33 2014
@@ -22,6 +22,7 @@ import java.util.Arrays;
 import org.apache.lucene.document.Document; // javadocs
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.FieldType;
+import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.Facets;
 import org.apache.lucene.util.BytesRef;
 
@@ -56,6 +57,10 @@ public class AssociationFacetField exten
    *  association */
   public AssociationFacetField(BytesRef assoc, String dim, String... path) {
     super("dummy", TYPE);
+    FacetField.verifyLabel(dim);
+    for(String label : path) {
+      FacetField.verifyLabel(label);
+    }
     this.dim = dim;
     this.assoc = assoc;
     if (path.length == 0) {

Modified: lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetCounts.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetCounts.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetCounts.java (original)
+++ lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetCounts.java Fri Jan 17 17:23:33 2014
@@ -23,7 +23,6 @@ import java.util.List;
 import org.apache.lucene.facet.FacetsCollector;
 import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
 import org.apache.lucene.facet.FacetsConfig;
-import org.apache.lucene.facet.OrdinalsReader;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.util.FixedBitSet;
 import org.apache.lucene.util.IntsRef;

Modified: lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java (original)
+++ lucene/dev/branches/lucene5376/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java Fri Jan 17 17:23:33 2014
@@ -22,11 +22,9 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.lucene.facet.DocValuesOrdinalsReader;
 import org.apache.lucene.facet.FacetsCollector;
 import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
 import org.apache.lucene.facet.FacetsConfig;
-import org.apache.lucene.facet.OrdinalsReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.queries.function.FunctionValues;
 import org.apache.lucene.queries.function.ValueSource;

Modified: lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java (original)
+++ lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java Fri Jan 17 17:23:33 2014
@@ -26,7 +26,10 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.lucene.facet.taxonomy.CachedOrdinalsReader;
+import org.apache.lucene.facet.taxonomy.DocValuesOrdinalsReader;
 import org.apache.lucene.facet.taxonomy.FastTaxonomyFacetCounts;
+import org.apache.lucene.facet.taxonomy.OrdinalsReader;
 import org.apache.lucene.facet.taxonomy.TaxonomyFacetCounts;
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
 import org.apache.lucene.util.BytesRef;

Modified: lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java (original)
+++ lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java Fri Jan 17 17:23:33 2014
@@ -40,10 +40,6 @@ import org.apache.lucene.facet.FacetsCon
 import org.apache.lucene.facet.LabelAndValue;
 import org.apache.lucene.facet.MultiFacets;
 import org.apache.lucene.facet.DrillSideways.DrillSidewaysResult;
-import org.apache.lucene.facet.range.DoubleRange;
-import org.apache.lucene.facet.range.DoubleRangeFacetCounts;
-import org.apache.lucene.facet.range.LongRange;
-import org.apache.lucene.facet.range.LongRangeFacetCounts;
 import org.apache.lucene.facet.taxonomy.TaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
@@ -507,6 +503,9 @@ public class TestRangeFacetCounts extend
       int numRange = _TestUtil.nextInt(random(), 1, 5);
       DoubleRange[] ranges = new DoubleRange[numRange];
       int[] expectedCounts = new int[numRange];
+      if (VERBOSE) {
+        System.out.println("TEST: " + numRange + " ranges");
+      }
       for(int rangeID=0;rangeID<numRange;rangeID++) {
         double min;
         if (rangeID > 0 && random().nextInt(10) == 7) {
@@ -539,6 +538,12 @@ public class TestRangeFacetCounts extend
           max = x;
         }
 
+        // Must truncate to float precision so that the
+        // drill-down counts (which use NRQ.newFloatRange)
+        // are correct:
+        min = (float) min;
+        max = (float) max;
+
         boolean minIncl;
         boolean maxIncl;
         if (min == max) {
@@ -550,6 +555,10 @@ public class TestRangeFacetCounts extend
         }
         ranges[rangeID] = new DoubleRange("r" + rangeID, min, minIncl, max, maxIncl);
 
+        if (VERBOSE) {
+          System.out.println("TEST:   range " + rangeID + ": " + ranges[rangeID]);
+        }
+
         // Do "slow but hopefully correct" computation of
         // expected count:
         for(int i=0;i<numDocs;i++) {
@@ -564,6 +573,9 @@ public class TestRangeFacetCounts extend
           } else {
             accept &= values[i] < max;
           }
+          if (VERBOSE) {
+            System.out.println("TEST:   check doc=" + i + " val=" + values[i] + " accept=" + accept);
+          }
           if (accept) {
             expectedCounts[rangeID]++;
           }
@@ -577,7 +589,7 @@ public class TestRangeFacetCounts extend
       assertEquals(numRange, result.labelValues.length);
       for(int rangeID=0;rangeID<numRange;rangeID++) {
         if (VERBOSE) {
-          System.out.println("  range " + rangeID + " expectedCount=" + expectedCounts[rangeID]);
+          System.out.println("TEST: verify range " + rangeID + " expectedCount=" + expectedCounts[rangeID]);
         }
         LabelAndValue subNode = result.labelValues[rangeID];
         assertEquals("r" + rangeID, subNode.label);

Modified: lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java (original)
+++ lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestFacetLabel.java Fri Jan 17 17:23:33 2014
@@ -2,7 +2,10 @@ package org.apache.lucene.facet.taxonomy
 
 import java.util.Arrays;
 
+import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.FacetTestCase;
+import org.apache.lucene.facet.sortedset.SortedSetDocValuesFacetField;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util._TestUtil;
 import org.junit.Test;
 
@@ -158,8 +161,104 @@ public class TestFacetLabel extends Face
         assertNotNull(new FacetLabel(components));
         fail("empty or null components should not be allowed: " + Arrays.toString(components));
       } catch (IllegalArgumentException e) {
-        // ok
+        // expected
       }
+      try {
+        new FacetField("dim", components);
+        fail("empty or null components should not be allowed: " + Arrays.toString(components));
+      } catch (IllegalArgumentException e) {
+        // expected
+      }
+      try {
+        new AssociationFacetField(new BytesRef(), "dim", components);
+        fail("empty or null components should not be allowed: " + Arrays.toString(components));
+      } catch (IllegalArgumentException e) {
+        // expected
+      }
+      try {
+        new IntAssociationFacetField(17, "dim", components);
+        fail("empty or null components should not be allowed: " + Arrays.toString(components));
+      } catch (IllegalArgumentException e) {
+        // expected
+      }
+      try {
+        new FloatAssociationFacetField(17.0f, "dim", components);
+        fail("empty or null components should not be allowed: " + Arrays.toString(components));
+      } catch (IllegalArgumentException e) {
+        // expected
+      }
+    }
+    try {
+      new FacetField(null, new String[] {"abc"});
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new FacetField("", new String[] {"abc"});
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new IntAssociationFacetField(17, null, new String[] {"abc"});
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new IntAssociationFacetField(17, "", new String[] {"abc"});
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new FloatAssociationFacetField(17.0f, null, new String[] {"abc"});
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new FloatAssociationFacetField(17.0f, "", new String[] {"abc"});
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new AssociationFacetField(new BytesRef(), null, new String[] {"abc"});
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new AssociationFacetField(new BytesRef(), "", new String[] {"abc"});
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new SortedSetDocValuesFacetField(null, "abc");
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new SortedSetDocValuesFacetField("", "abc");
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new SortedSetDocValuesFacetField("dim", null);
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    try {
+      new SortedSetDocValuesFacetField("dim", "");
+      fail("empty or null components should not be allowed");
+    } catch (IllegalArgumentException e) {
+      // expected
     }
   }
 

Modified: lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java (original)
+++ lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java Fri Jan 17 17:23:33 2014
@@ -30,8 +30,6 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.StringField;
-import org.apache.lucene.facet.CachedOrdinalsReader;
-import org.apache.lucene.facet.DocValuesOrdinalsReader;
 import org.apache.lucene.facet.DrillDownQuery;
 import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.FacetResult;
@@ -40,7 +38,6 @@ import org.apache.lucene.facet.Facets;
 import org.apache.lucene.facet.FacetsCollector;
 import org.apache.lucene.facet.FacetsConfig;
 import org.apache.lucene.facet.LabelAndValue;
-import org.apache.lucene.facet.OrdinalsReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
 import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
 import org.apache.lucene.index.DirectoryReader;

Modified: lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java (original)
+++ lucene/dev/branches/lucene5376/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java Fri Jan 17 17:23:33 2014
@@ -30,7 +30,6 @@ import org.apache.lucene.document.FloatD
 import org.apache.lucene.document.IntField;
 import org.apache.lucene.document.NumericDocValuesField;
 import org.apache.lucene.document.StringField;
-import org.apache.lucene.facet.DocValuesOrdinalsReader;
 import org.apache.lucene.facet.FacetField;
 import org.apache.lucene.facet.FacetResult;
 import org.apache.lucene.facet.FacetTestCase;

Modified: lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (original)
+++ lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java Fri Jan 17 17:23:33 2014
@@ -20,11 +20,16 @@ package org.apache.lucene.search.highlig
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.Fields;
@@ -35,10 +40,6 @@ import org.apache.lucene.index.TermsEnum
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Comparator;
-
 /**
  * Hides implementation issues associated with obtaining a TokenStream for use
  * with the higlighter - can obtain from TermFreqVectors with offsets and
@@ -169,11 +170,14 @@ public class TokenSources {
 
       PositionIncrementAttribute posincAtt;
 
+      PayloadAttribute payloadAtt;
+
       StoredTokenStream(Token tokens[]) {
         this.tokens = tokens;
         termAtt = addAttribute(CharTermAttribute.class);
         offsetAtt = addAttribute(OffsetAttribute.class);
         posincAtt = addAttribute(PositionIncrementAttribute.class);
+        payloadAtt = addAttribute(PayloadAttribute.class);
       }
 
       @Override
@@ -185,6 +189,10 @@ public class TokenSources {
         clearAttributes();
         termAtt.setEmpty().append(token);
         offsetAtt.setOffset(token.startOffset(), token.endOffset());
+        BytesRef payload = token.getPayload();
+        if (payload != null) {
+          payloadAtt.setPayload(payload);
+        }
         posincAtt
             .setPositionIncrement(currentToken <= 1
                 || tokens[currentToken - 1].startOffset() > tokens[currentToken - 2]
@@ -192,6 +200,9 @@ public class TokenSources {
         return true;
       }
     }
+
+    boolean hasPayloads = tpv.hasPayloads();
+
     // code to reconstruct the original sequence of Tokens
     TermsEnum termsEnum = tpv.iterator(null);
     int totalTokens = 0;
@@ -223,6 +234,13 @@ public class TokenSources {
         final Token token = new Token(term,
                                       dpEnum.startOffset(),
                                       dpEnum.endOffset());
+        if (hasPayloads) {
+          // Must make a deep copy of the returned payload,
+          // since D&PEnum API is allowed to re-use on every
+          // call:
+          token.setPayload(BytesRef.deepCopyOf(dpEnum.getPayload()));
+        }
+
         if (tokenPositionsGuaranteedContiguous && pos != -1) {
           // We have positions stored and a guarantee that the token position
           // information is contiguous
@@ -253,9 +271,11 @@ public class TokenSources {
       ArrayUtil.timSort(tokensInOriginalOrder, new Comparator<Token>() {
         @Override
         public int compare(Token t1, Token t2) {
-          if (t1.startOffset() == t2.startOffset()) return t1.endOffset()
-              - t2.endOffset();
-          else return t1.startOffset() - t2.startOffset();
+          if (t1.startOffset() == t2.startOffset()) {
+            return t1.endOffset() - t2.endOffset();
+          } else {
+            return t1.startOffset() - t2.startOffset();
+          }
         }
       });
     }

Modified: lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java (original)
+++ lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java Fri Jan 17 17:23:33 2014
@@ -26,6 +26,7 @@ import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.Terms;
@@ -48,6 +49,8 @@ public final class TokenStreamFromTermPo
 
   private OffsetAttribute offsetAttribute;
 
+  private PayloadAttribute payloadAttribute;
+
   /**
    * Constructor.
    * 
@@ -59,7 +62,9 @@ public final class TokenStreamFromTermPo
     termAttribute = addAttribute(CharTermAttribute.class);
     positionIncrementAttribute = addAttribute(PositionIncrementAttribute.class);
     offsetAttribute = addAttribute(OffsetAttribute.class);
+    payloadAttribute = addAttribute(PayloadAttribute.class);
     final boolean hasOffsets = vector.hasOffsets();
+    final boolean hasPayloads = vector.hasPayloads();
     final TermsEnum termsEnum = vector.iterator(null);
     BytesRef text;
     DocsAndPositionsEnum dpEnum = null;
@@ -79,6 +84,13 @@ public final class TokenStreamFromTermPo
           token = new Token();
           token.setEmpty().append(text.utf8ToString());
         }
+        if (hasPayloads) {
+          // Must make a deep copy of the returned payload,
+          // since D&PEnum API is allowed to re-use on every
+          // call:
+          token.setPayload(BytesRef.deepCopyOf(dpEnum.getPayload()));
+        }
+
         // Yes - this is the position, not the increment! This is for
         // sorting. This value
         // will be corrected before use.
@@ -112,6 +124,7 @@ public final class TokenStreamFromTermPo
       positionIncrementAttribute.setPositionIncrement(next
           .getPositionIncrement());
       offsetAttribute.setOffset(next.startOffset(), next.endOffset());
+      payloadAttribute.setPayload(next.getPayload());
       return true;
     }
     return false;

Modified: lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldQuery.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldQuery.java (original)
+++ lucene/dev/branches/lucene5376/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldQuery.java Fri Jan 17 17:23:33 2014
@@ -91,14 +91,15 @@ public class FieldQuery {
   void flatten( Query sourceQuery, IndexReader reader, Collection<Query> flatQueries ) throws IOException{
     if( sourceQuery instanceof BooleanQuery ){
       BooleanQuery bq = (BooleanQuery)sourceQuery;
-      for( BooleanClause clause : bq.getClauses() ){
-        if( !clause.isProhibited() )
-          flatten( clause.getQuery(), reader, flatQueries );
+      for( BooleanClause clause : bq ) {
+        if( !clause.isProhibited() ) {
+          flatten( applyParentBoost( clause.getQuery(), bq ), reader, flatQueries );
+        }
       }
     } else if( sourceQuery instanceof DisjunctionMaxQuery ){
       DisjunctionMaxQuery dmq = (DisjunctionMaxQuery)sourceQuery;
       for( Query query : dmq ){
-        flatten( query, reader, flatQueries );
+        flatten( applyParentBoost( query, dmq ), reader, flatQueries );
       }
     }
     else if( sourceQuery instanceof TermQuery ){
@@ -111,18 +112,20 @@ public class FieldQuery {
         if( pq.getTerms().length > 1 )
           flatQueries.add( pq );
         else if( pq.getTerms().length == 1 ){
-          flatQueries.add( new TermQuery( pq.getTerms()[0] ) );
+          Query flat = new TermQuery( pq.getTerms()[0] );
+          flat.setBoost( pq.getBoost() );
+          flatQueries.add( flat );
         }
       }
     } else if (sourceQuery instanceof ConstantScoreQuery) {
       final Query q = ((ConstantScoreQuery) sourceQuery).getQuery();
       if (q != null) {
-        flatten(q, reader, flatQueries);
+        flatten( applyParentBoost( q, sourceQuery ), reader, flatQueries);
       }
     } else if (sourceQuery instanceof FilteredQuery) {
       final Query q = ((FilteredQuery) sourceQuery).getQuery();
       if (q != null) {
-        flatten(q, reader, flatQueries);
+        flatten( applyParentBoost( q, sourceQuery ), reader, flatQueries);
       }
     } else if (reader != null){
       Query query = sourceQuery;
@@ -142,6 +145,18 @@ public class FieldQuery {
     }
     // else discard queries
   }
+
+  /**
+   * Push parent's boost into a clone of query if parent has a non 1 boost.
+   */
+  protected Query applyParentBoost( Query query, Query parent ) {
+    if ( parent.getBoost() == 1 ) {
+      return query;
+    }
+    Query cloned = query.clone();
+    cloned.setBoost( query.getBoost() * parent.getBoost() );
+    return cloned;
+  }
   
   /*
    * Create expandQueries from flatQueries.

Modified: lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (original)
+++ lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java Fri Jan 17 17:23:33 2014
@@ -1954,8 +1954,8 @@ final class SynonymAnalyzer extends Anal
    *      java.io.Reader)
    */
   @Override
-  public TokenStreamComponents createComponents(String arg0, Reader arg1) {
-    Tokenizer stream = new MockTokenizer(arg1, MockTokenizer.SIMPLE, true);
+  public TokenStreamComponents createComponents(String arg0) {
+    Tokenizer stream = new MockTokenizer(MockTokenizer.SIMPLE, true);
     stream.addAttribute(CharTermAttribute.class);
     stream.addAttribute(PositionIncrementAttribute.class);
     stream.addAttribute(OffsetAttribute.class);

Modified: lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java (original)
+++ lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java Fri Jan 17 17:23:33 2014
@@ -27,23 +27,21 @@ public class OffsetLimitTokenFilterTest 
   public void testFilter() throws Exception {
     // we disable MockTokenizer checks because we will forcefully limit the 
     // tokenstream and call end() before incrementToken() returns false.
-    MockTokenizer stream = new MockTokenizer(new StringReader(
-        "short toolong evenmuchlongertext a ab toolong foo"),
+    MockTokenizer stream = new MockTokenizer(
         MockTokenizer.WHITESPACE, false);
+    stream.setReader(new StringReader("short toolong evenmuchlongertext a ab toolong foo"));
     stream.setEnableChecks(false);
     OffsetLimitTokenFilter filter = new OffsetLimitTokenFilter(stream, 10);
     assertTokenStreamContents(filter, new String[] {"short", "toolong"});
     
-    stream = new MockTokenizer(new StringReader(
-    "short toolong evenmuchlongertext a ab toolong foo"),
-    MockTokenizer.WHITESPACE, false);
+    stream = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+    stream.setReader(new StringReader("short toolong evenmuchlongertext a ab toolong foo"));
     stream.setEnableChecks(false);
     filter = new OffsetLimitTokenFilter(stream, 12);
     assertTokenStreamContents(filter, new String[] {"short", "toolong"});
     
-    stream = new MockTokenizer(new StringReader(
-        "short toolong evenmuchlongertext a ab toolong foo"),
-        MockTokenizer.WHITESPACE, false);
+    stream = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+    stream.setReader(new StringReader("short toolong evenmuchlongertext a ab toolong foo"));
     stream.setEnableChecks(false);
     filter = new OffsetLimitTokenFilter(stream, 30);
     assertTokenStreamContents(filter, new String[] {"short", "toolong",
@@ -52,8 +50,8 @@ public class OffsetLimitTokenFilterTest 
     checkOneTerm(new Analyzer() {
       
       @Override
-      public TokenStreamComponents createComponents(String fieldName, Reader reader) {
-        MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
+      public TokenStreamComponents createComponents(String fieldName) {
+        MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
         tokenizer.setEnableChecks(false);
         return new TokenStreamComponents(tokenizer, new OffsetLimitTokenFilter(tokenizer, 10));
       }

Modified: lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java (original)
+++ lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java Fri Jan 17 17:23:33 2014
@@ -17,10 +17,14 @@ package org.apache.lucene.search.highlig
  * limitations under the License.
  */
 
+import java.io.IOException;
+
+import org.apache.lucene.analysis.CannedTokenStream;
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -29,6 +33,7 @@ import org.apache.lucene.document.TextFi
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.DisjunctionMaxQuery;
 import org.apache.lucene.search.IndexSearcher;
@@ -38,10 +43,9 @@ import org.apache.lucene.search.spans.Sp
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 
-import java.io.IOException;
-
 // LUCENE-2874
 public class TokenSourcesTest extends LuceneTestCase {
   private static final String FIELD = "text";
@@ -262,7 +266,6 @@ public class TokenSourcesTest extends Lu
 
   public void testTermVectorWithoutOffsetsThrowsException()
       throws IOException, InvalidTokenOffsetsException {
-    final String TEXT = "the fox did not jump";
     final Directory directory = newDirectory();
     final IndexWriter indexWriter = new IndexWriter(directory,
         newIndexWriterConfig(TEST_VERSION_CURRENT, null));
@@ -280,8 +283,7 @@ public class TokenSourcesTest extends Lu
     final IndexReader indexReader = DirectoryReader.open(directory);
     try {
       assertEquals(1, indexReader.numDocs());
-      final TokenStream tokenStream = TokenSources
-          .getTokenStream(
+      TokenSources.getTokenStream(
               indexReader.getTermVector(0, FIELD),
               false);
       fail("TokenSources.getTokenStream should throw IllegalArgumentException if term vector has no offsets");
@@ -295,5 +297,68 @@ public class TokenSourcesTest extends Lu
     }
   }
 
+  int curOffset;
 
+  /** Just make a token with the text, and set the payload
+   *  to the text as well.  Offets increment "naturally". */
+  private Token getToken(String text) {
+    Token t = new Token(text, curOffset, curOffset+text.length());
+    t.setPayload(new BytesRef(text));
+    curOffset++;
+    return t;
+  }
+
+  // LUCENE-5294
+  public void testPayloads() throws Exception {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+    FieldType myFieldType = new FieldType(TextField.TYPE_NOT_STORED);
+    myFieldType.setStoreTermVectors(true);
+    myFieldType.setStoreTermVectorOffsets(true);
+    myFieldType.setStoreTermVectorPositions(true);
+    myFieldType.setStoreTermVectorPayloads(true);
+
+    curOffset = 0;
+
+    Token[] tokens = new Token[] {
+      getToken("foxes"),
+      getToken("can"),
+      getToken("jump"),
+      getToken("high")
+    };
+
+    Document doc = new Document();
+    doc.add(new Field("field", new CannedTokenStream(tokens), myFieldType));
+    writer.addDocument(doc);
+  
+    IndexReader reader = writer.getReader();
+    writer.close();
+    assertEquals(1, reader.numDocs());
+
+    for(int i=0;i<2;i++) {
+      // Do this twice, once passing true and then passing
+      // false: they are entirely different code paths
+      // under-the-hood:
+      TokenStream ts = TokenSources.getTokenStream(reader.getTermVectors(0).terms("field"), i == 0);
+
+      CharTermAttribute termAtt = ts.getAttribute(CharTermAttribute.class);
+      PositionIncrementAttribute posIncAtt = ts.getAttribute(PositionIncrementAttribute.class);
+      OffsetAttribute offsetAtt = ts.getAttribute(OffsetAttribute.class);
+      PayloadAttribute payloadAtt = ts.getAttribute(PayloadAttribute.class);
+
+      for(Token token : tokens) {
+        assertTrue(ts.incrementToken());
+        assertEquals(token.toString(), termAtt.toString());
+        assertEquals(token.getPositionIncrement(), posIncAtt.getPositionIncrement());
+        assertEquals(token.getPayload(), payloadAtt.getPayload());
+        assertEquals(token.startOffset(), offsetAtt.startOffset());
+        assertEquals(token.endOffset(), offsetAtt.endOffset());
+      }
+
+      assertFalse(ts.incrementToken());
+    }
+
+    reader.close();
+    dir.close();
+  }
 }

Modified: lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (original)
+++ lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java Fri Jan 17 17:23:33 2014
@@ -198,8 +198,8 @@ public abstract class AbstractTestCase e
 
   static final class BigramAnalyzer extends Analyzer {
     @Override
-    public TokenStreamComponents createComponents(String fieldName, Reader reader) {
-      return new TokenStreamComponents(new BasicNGramTokenizer(reader));
+    public TokenStreamComponents createComponents(String fieldName) {
+      return new TokenStreamComponents(new BasicNGramTokenizer());
     }
   }
   
@@ -221,20 +221,20 @@ public abstract class AbstractTestCase e
     private int charBufferIndex;
     private int charBufferLen;
     
-    public BasicNGramTokenizer( Reader in ){
-      this( in, DEFAULT_N_SIZE );
+    public BasicNGramTokenizer( ){
+      this( DEFAULT_N_SIZE );
     }
     
-    public BasicNGramTokenizer( Reader in, int n ){
-      this( in, n, DEFAULT_DELIMITERS );
+    public BasicNGramTokenizer( int n ){
+      this( n, DEFAULT_DELIMITERS );
     }
     
-    public BasicNGramTokenizer( Reader in, String delimiters ){
-      this( in, DEFAULT_N_SIZE, delimiters );
+    public BasicNGramTokenizer( String delimiters ){
+      this( DEFAULT_N_SIZE, delimiters );
     }
     
-    public BasicNGramTokenizer( Reader in, int n, String delimiters ){
-      super(in);
+    public BasicNGramTokenizer(int n, String delimiters ){
+      super();
       this.n = n;
       this.delimiters = delimiters;
       startTerm = 0;

Modified: lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java (original)
+++ lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java Fri Jan 17 17:23:33 2014
@@ -257,8 +257,53 @@ public class FastVectorHighlighterTest e
     writer.close();
     dir.close();
   }
-  
-  public void testCommonTermsQueryHighlightTest() throws IOException {
+
+  public void testBoostedPhraseHighlightTest() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer( random() ) ) );
+    Document doc = new Document();
+    FieldType type = new FieldType( TextField.TYPE_STORED  );
+    type.setStoreTermVectorOffsets( true );
+    type.setStoreTermVectorPositions( true );
+    type.setStoreTermVectors( true );
+    type.freeze();
+    StringBuilder text = new StringBuilder();
+    text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk ");
+    for ( int i = 0; i<10; i++ ) {
+      text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk ");
+    }
+    text.append("highlight words together ");
+    for ( int i = 0; i<10; i++ ) {
+      text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk ");
+    }
+    doc.add( new Field( "text", text.toString().trim(), type ) );
+    writer.addDocument(doc);
+    FastVectorHighlighter highlighter = new FastVectorHighlighter();
+    IndexReader reader = DirectoryReader.open(writer, true);
+
+    // This mimics what some query parsers do to <highlight words together>
+    BooleanQuery terms = new BooleanQuery();
+    terms.add( clause( "text", "highlight" ), Occur.MUST );
+    terms.add( clause( "text", "words" ), Occur.MUST );
+    terms.add( clause( "text", "together" ), Occur.MUST );
+    // This mimics what some query parsers do to <"highlight words together">
+    BooleanQuery phrase = new BooleanQuery();
+    phrase.add( clause( "text", "highlight", "words", "together" ), Occur.MUST );
+    phrase.setBoost( 100 );
+    // Now combine those results in a boolean query which should pull the phrases to the front of the list of fragments 
+    BooleanQuery query = new BooleanQuery();
+    query.add( phrase, Occur.MUST );
+    query.add( phrase, Occur.SHOULD );
+    FieldQuery fieldQuery = new FieldQuery( query, reader, true, false );
+    String fragment = highlighter.getBestFragment( fieldQuery, reader, 0, "text", 100 );
+    assertEquals( "junk junk junk junk junk junk junk junk <b>highlight words together</b> junk junk junk junk junk junk junk junk", fragment );
+
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+
+  public void testCommonTermsQueryHighlight() throws IOException {
     Directory dir = newDirectory();
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,  new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)));
     FieldType type = new FieldType(TextField.TYPE_STORED);

Modified: lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldQueryTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldQueryTest.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldQueryTest.java (original)
+++ lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldQueryTest.java Fri Jan 17 17:23:33 2014
@@ -44,44 +44,55 @@ import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 
 public class FieldQueryTest extends AbstractTestCase {
+  private float boost;
+
+  /**
+   * Set boost to a random value each time it is called.
+   */
+  private void initBoost() {
+    boost = usually() ? 1F : random().nextFloat() * 10000;
+  }
 
   public void testFlattenBoolean() throws Exception {
+    initBoost();
     BooleanQuery booleanQuery = new BooleanQuery();
-    booleanQuery.add(new TermQuery(new Term(F, "A")), Occur.MUST);
-    booleanQuery.add(new TermQuery(new Term(F, "B")), Occur.MUST);
-    booleanQuery.add(new TermQuery(new Term(F, "C")), Occur.SHOULD);
+    booleanQuery.setBoost( boost );
+    booleanQuery.add(tq("A"), Occur.MUST);
+    booleanQuery.add(tq("B"), Occur.MUST);
+    booleanQuery.add(tq("C"), Occur.SHOULD);
 
     BooleanQuery innerQuery = new BooleanQuery();
-    innerQuery.add(new TermQuery(new Term(F, "D")), Occur.MUST);
-    innerQuery.add(new TermQuery(new Term(F, "E")), Occur.MUST);
+    innerQuery.add(tq("D"), Occur.MUST);
+    innerQuery.add(tq("E"), Occur.MUST);
     booleanQuery.add(innerQuery, Occur.MUST_NOT);
 
     FieldQuery fq = new FieldQuery(booleanQuery, true, true );
     Set<Query> flatQueries = new HashSet<Query>();
     fq.flatten(booleanQuery, reader, flatQueries);
-    assertCollectionQueries( flatQueries, tq( "A" ), tq( "B" ), tq( "C" ) );
+    assertCollectionQueries( flatQueries, tq( boost, "A" ), tq( boost, "B" ), tq( boost, "C" ) );
   }
 
   public void testFlattenDisjunctionMaxQuery() throws Exception {
+    initBoost();
     Query query = dmq( tq( "A" ), tq( "B" ), pqF( "C", "D" ) );
+    query.setBoost( boost );
     FieldQuery fq = new FieldQuery( query, true, true );
     Set<Query> flatQueries = new HashSet<Query>();
     fq.flatten( query, reader, flatQueries );
-    assertCollectionQueries( flatQueries, tq( "A" ), tq( "B" ), pqF( "C", "D" ) );
+    assertCollectionQueries( flatQueries, tq( boost, "A" ), tq( boost, "B" ), pqF( boost, "C", "D" ) );
   }
 
   public void testFlattenTermAndPhrase() throws Exception {
+    initBoost();
     BooleanQuery booleanQuery = new BooleanQuery();
-    booleanQuery.add(new TermQuery(new Term(F, "A")), Occur.MUST);
-    PhraseQuery phraseQuery = new PhraseQuery();
-    phraseQuery.add(new Term(F, "B"));
-    phraseQuery.add(new Term(F, "C"));
-    booleanQuery.add(phraseQuery, Occur.MUST);
+    booleanQuery.setBoost( boost );
+    booleanQuery.add(tq("A"), Occur.MUST);
+    booleanQuery.add(pqF("B", "C"), Occur.MUST);
 
     FieldQuery fq = new FieldQuery(booleanQuery, true, true );
     Set<Query> flatQueries = new HashSet<Query>();
     fq.flatten(booleanQuery, reader, flatQueries);
-    assertCollectionQueries( flatQueries, tq( "A" ), pqF( "B", "C" ) );
+    assertCollectionQueries( flatQueries, tq( boost, "A" ), pqF( boost, "B", "C" ) );
   }
 
   public void testFlattenTermAndPhrase2gram() throws Exception {
@@ -926,6 +937,7 @@ public class FieldQueryTest extends Abst
   }
   
   public void testFlattenFilteredQuery() throws Exception {
+    initBoost();
     Query query = new FilteredQuery(pqF( "A" ), new Filter() {
       @Override
       public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs)
@@ -933,18 +945,21 @@ public class FieldQueryTest extends Abst
         return null;
       }
     });
+    query.setBoost(boost);
     FieldQuery fq = new FieldQuery( query, true, true );
     Set<Query> flatQueries = new HashSet<Query>();
     fq.flatten( query, reader, flatQueries );
-    assertCollectionQueries( flatQueries, tq( "A" ) );
+    assertCollectionQueries( flatQueries, tq( boost, "A" ) );
   }
   
   public void testFlattenConstantScoreQuery() throws Exception {
+    initBoost();
     Query query = new ConstantScoreQuery(pqF( "A" ));
+    query.setBoost(boost);
     FieldQuery fq = new FieldQuery( query, true, true );
     Set<Query> flatQueries = new HashSet<Query>();
     fq.flatten( query, reader, flatQueries );
-    assertCollectionQueries( flatQueries, tq( "A" ) );
+    assertCollectionQueries( flatQueries, tq( boost, "A" ) );
   }
   
 }

Modified: lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java (original)
+++ lucene/dev/branches/lucene5376/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java Fri Jan 17 17:23:33 2014
@@ -297,8 +297,8 @@ public class IndexTimeSynonymTest extend
     }
     
     @Override
-    public TokenStreamComponents createComponents(String fieldName, Reader reader) {
-      Tokenizer ts = new Tokenizer(Token.TOKEN_ATTRIBUTE_FACTORY, reader) {
+    public TokenStreamComponents createComponents(String fieldName) {
+      Tokenizer ts = new Tokenizer(Token.TOKEN_ATTRIBUTE_FACTORY) {
         final AttributeImpl reusableToken = (AttributeImpl) addAttribute(CharTermAttribute.class);
         int p = 0;
         

Modified: lucene/dev/branches/lucene5376/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java (original)
+++ lucene/dev/branches/lucene5376/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java Fri Jan 17 17:23:33 2014
@@ -24,7 +24,6 @@ import java.util.Set;
 
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;       // javadocs
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.Explanation;
@@ -32,7 +31,6 @@ import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Scorer.ChildScorer;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.FixedBitSet;
@@ -48,6 +46,11 @@ import org.apache.lucene.util.FixedBitSe
 
 public class ToChildBlockJoinQuery extends Query {
 
+  /** Message thrown from {@link
+   *  ToChildBlockJoinScorer#validateParentDoc} on mis-use,
+   *  when the parent query incorrectly returns child docs. */
+  static final String INVALID_QUERY_MESSAGE = "Parent query yields document which is not matched by parents filter, docID=";
+
   private final Filter parentsFilter;
   private final Query parentQuery;
 
@@ -203,6 +206,7 @@ public class ToChildBlockJoinQuery exten
           // children:
           while (true) {
             parentDoc = parentScorer.nextDoc();
+            validateParentDoc();
 
             if (parentDoc == 0) {
               // Degenerate but allowed: parent has no children
@@ -211,6 +215,7 @@ public class ToChildBlockJoinQuery exten
               // tricky because scorer must return -1 for
               // .doc() on init...
               parentDoc = parentScorer.nextDoc();
+              validateParentDoc();
             }
 
             if (parentDoc == NO_MORE_DOCS) {
@@ -248,6 +253,14 @@ public class ToChildBlockJoinQuery exten
       }
     }
 
+    /** Detect mis-use, where provided parent query in fact
+     *  sometimes returns child documents.  */
+    private void validateParentDoc() {
+      if (parentDoc != NO_MORE_DOCS && !parentBits.get(parentDoc)) {
+        throw new IllegalStateException(INVALID_QUERY_MESSAGE + parentDoc);
+      }
+    }
+
     @Override
     public int docID() {
       return childDoc;
@@ -277,6 +290,7 @@ public class ToChildBlockJoinQuery exten
       if (childDoc == -1 || childTarget > parentDoc) {
         // Advance to new parent:
         parentDoc = parentScorer.advance(childTarget);
+        validateParentDoc();
         //System.out.println("  advance to parentDoc=" + parentDoc);
         assert parentDoc > childTarget;
         if (parentDoc == NO_MORE_DOCS) {

Modified: lucene/dev/branches/lucene5376/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinFieldComparator.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinFieldComparator.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinFieldComparator.java (original)
+++ lucene/dev/branches/lucene5376/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinFieldComparator.java Fri Jan 17 17:23:33 2014
@@ -60,6 +60,11 @@ public abstract class ToParentBlockJoinF
   }
 
   @Override
+  public void setTopValue(Object value) {
+    wrappedComparator.setTopValue(value);
+  }
+
+  @Override
   public FieldComparator<Object> setNextReader(AtomicReaderContext context) throws IOException {
     DocIdSet innerDocuments = childFilter.getDocIdSet(context, null);
     if (isEmpty(innerDocuments)) {
@@ -193,7 +198,7 @@ public abstract class ToParentBlockJoinF
 
     @Override
     @SuppressWarnings("unchecked")
-    public int compareDocToValue(int parentDoc, Object value) throws IOException {
+    public int compareTop(int parentDoc) throws IOException {
       if (parentDoc == 0 || parentDocuments == null || childDocuments == null) {
         return 0;
       }
@@ -216,7 +221,7 @@ public abstract class ToParentBlockJoinF
         if (childDoc >= parentDoc || childDoc == -1) {
           return cmp;
         }
-        int cmp1 = wrappedComparator.compareDocToValue(childDoc, value);
+        int cmp1 = wrappedComparator.compareTop(childDoc);
         if (cmp1 > 0) {
           return cmp1;
         } else {
@@ -309,7 +314,7 @@ public abstract class ToParentBlockJoinF
 
     @Override
     @SuppressWarnings("unchecked")
-    public int compareDocToValue(int parentDoc, Object value) throws IOException {
+    public int compareTop(int parentDoc) throws IOException {
       if (parentDoc == 0 || parentDocuments == null || childDocuments == null) {
         return 0;
       }
@@ -330,7 +335,7 @@ public abstract class ToParentBlockJoinF
         if (childDoc >= parentDoc || childDoc == -1) {
           return cmp;
         }
-        int cmp1 = wrappedComparator.compareDocToValue(childDoc, value);
+        int cmp1 = wrappedComparator.compareTop(childDoc);
         if (cmp1 < 0) {
           return cmp1;
         } else {

Modified: lucene/dev/branches/lucene5376/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (original)
+++ lucene/dev/branches/lucene5376/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java Fri Jan 17 17:23:33 2014
@@ -261,8 +261,8 @@ public class MemoryIndexTest extends Bas
       case 1: return new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET);
       case 2: return new Analyzer() {
         @Override
-        protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
-          Tokenizer tokenizer = new MockTokenizer(reader);
+        protected TokenStreamComponents createComponents(String fieldName) {
+          Tokenizer tokenizer = new MockTokenizer();
           return new TokenStreamComponents(tokenizer, new CrazyTokenFilter(tokenizer));
         }
       };

Modified: lucene/dev/branches/lucene5376/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSource.java Fri Jan 17 17:23:33 2014
@@ -131,6 +131,7 @@ public abstract class ValueSource {
     private FunctionValues docVals;
     private double bottom;
     private final Map fcontext;
+    private double topValue;
 
     ValueSourceComparator(Map fcontext, int numHits) {
       this.fcontext = fcontext;
@@ -164,15 +165,19 @@ public abstract class ValueSource {
     }
 
     @Override
+    public void setTopValue(final Double value) {
+      this.topValue = value.doubleValue();
+    }
+
+    @Override
     public Double value(int slot) {
       return values[slot];
     }
 
     @Override
-    public int compareDocToValue(int doc, Double valueObj) {
-      final double value = valueObj;
+    public int compareTop(int doc) {
       final double docValue = docVals.doubleVal(doc);
-      return Double.compare(docValue, value);
+      return Double.compare(topValue, docValue);
     }
   }
 }

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/build.xml?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/build.xml (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/build.xml Fri Jan 17 17:23:33 2014
@@ -46,6 +46,15 @@
 
   <target name="javacc" depends="javacc-QueryParser,javacc-surround,javacc-flexible"/>
   
+  <macrodef name="replaceStringBuffer">
+    <attribute name="dir"/>
+    <sequential>
+      <replace token="StringBuffer" value="StringBuilder" encoding="UTF-8">
+         <fileset dir="@{dir}" includes="ParseException.java TokenMgrError.java"/>
+      </replace>
+    </sequential>
+  </macrodef>
+  
   <target name="javacc-QueryParser" depends="resolve-javacc">
     <sequential>
       <invoke-javacc target="src/java/org/apache/lucene/queryparser/classic/QueryParser.jj"
@@ -60,7 +69,7 @@
 		     byline="true"
 		     match="public QueryParser\(QueryParserTokenManager "
 		     replace="protected QueryParser(QueryParserTokenManager "/>
-
+      <replaceStringBuffer dir="src/java/org/apache/lucene/queryparser/classic"/>
     </sequential>
   </target>
 
@@ -68,6 +77,7 @@
   	<invoke-javacc target="src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj"
                    outputDir="src/java/org/apache/lucene/queryparser/surround/parser"
     />
+    <replaceStringBuffer dir="src/java/org/apache/lucene/queryparser/surround/parser"/>
   </target>
 
   <target name="javacc-flexible" depends="resolve-javacc">
@@ -126,6 +136,7 @@ import org.apache.lucene.queryparser.fle
                              replace="  static private String add_escapes(String str) {"
                              flags="g"
                              byline="true"/>
+        <replaceStringBuffer dir="src/java/org/apache/lucene/queryparser/flexible/standard/parser"/>
   </target>
 
   <target name="resolve-javacc" xmlns:ivy="antlib:org.apache.ivy.ant">

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/CharStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/CharStream.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/CharStream.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/CharStream.java Fri Jan 17 17:23:33 2014
@@ -112,4 +112,4 @@ interface CharStream {
   void Done();
 
 }
-/* JavaCC - OriginalChecksum=30b94cad7b10d0d81e3a59a1083939d0 (do not edit this line) */
+/* JavaCC - OriginalChecksum=c847dd1920bf7901125a7244125682ad (do not edit this line) */

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/ParseException.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/ParseException.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/ParseException.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/ParseException.java Fri Jan 17 17:23:33 2014
@@ -89,7 +89,7 @@ public class ParseException extends Exce
                            int[][] expectedTokenSequences,
                            String[] tokenImage) {
     String eol = System.getProperty("line.separator", "\n");
-    StringBuffer expected = new StringBuffer();
+    StringBuilder expected = new StringBuilder();
     int maxSize = 0;
     for (int i = 0; i < expectedTokenSequences.length; i++) {
       if (maxSize < expectedTokenSequences[i].length) {
@@ -139,7 +139,7 @@ public class ParseException extends Exce
    * string literal.
    */
   static String add_escapes(String str) {
-      StringBuffer retval = new StringBuffer();
+      StringBuilder retval = new StringBuilder();
       char ch;
       for (int i = 0; i < str.length(); i++) {
         switch (str.charAt(i))
@@ -184,4 +184,4 @@ public class ParseException extends Exce
    }
 
 }
-/* JavaCC - OriginalChecksum=b187d97d5bb75c3fc63d642c1c26ac6e (do not edit this line) */
+/* JavaCC - OriginalChecksum=61602edcb3a15810cbc58f5593eba40d (do not edit this line) */

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/Token.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/Token.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/Token.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/Token.java Fri Jan 17 17:23:33 2014
@@ -128,4 +128,4 @@ public class Token implements java.io.Se
   }
 
 }
-/* JavaCC - OriginalChecksum=405bb5d2fcd84e94ac1c8f0b12c1f914 (do not edit this line) */
+/* JavaCC - OriginalChecksum=c1e1418b35aa9e47ef8dc98b87423d70 (do not edit this line) */

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/TokenMgrError.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/TokenMgrError.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/TokenMgrError.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/TokenMgrError.java Fri Jan 17 17:23:33 2014
@@ -48,7 +48,7 @@ public class TokenMgrError extends Error
    * equivalents in the given string
    */
   protected static final String addEscapes(String str) {
-    StringBuffer retval = new StringBuffer();
+    StringBuilder retval = new StringBuilder();
     char ch;
     for (int i = 0; i < str.length(); i++) {
       switch (str.charAt(i))
@@ -144,4 +144,4 @@ public class TokenMgrError extends Error
     this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
   }
 }
-/* JavaCC - OriginalChecksum=f433e1a52b8eadbf12f3fbbbf87fd140 (do not edit this line) */
+/* JavaCC - OriginalChecksum=0c275864a1972d9a01601ab81426872d (do not edit this line) */

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/CharStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/CharStream.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/CharStream.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/CharStream.java Fri Jan 17 17:23:33 2014
@@ -112,4 +112,4 @@ interface CharStream {
   void Done();
 
 }
-/* JavaCC - OriginalChecksum=53b2ec7502d50e2290e86187a6c01270 (do not edit this line) */
+/* JavaCC - OriginalChecksum=c95f1720d9b38046dc5d294b741c44cb (do not edit this line) */

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/ParseException.java Fri Jan 17 17:23:33 2014
@@ -92,7 +92,7 @@ public class ParseException extends Quer
                            int[][] expectedTokenSequences,
                            String[] tokenImage) {
     String eol = System.getProperty("line.separator", "\n");
-    StringBuffer expected = new StringBuffer();
+    StringBuilder expected = new StringBuilder();
     int maxSize = 0;
     for (int i = 0; i < expectedTokenSequences.length; i++) {
       if (maxSize < expectedTokenSequences[i].length) {
@@ -142,7 +142,7 @@ public class ParseException extends Quer
    * string literal.
    */
   static String add_escapes(String str) {
-      StringBuffer retval = new StringBuffer();
+      StringBuilder retval = new StringBuilder();
       char ch;
       for (int i = 0; i < str.length(); i++) {
         switch (str.charAt(i))
@@ -187,4 +187,4 @@ public class ParseException extends Quer
    }
 
 }
-/* JavaCC - OriginalChecksum=4263a02db9988d7a863aa97ad2f6dc67 (do not edit this line) */
+/* JavaCC - OriginalChecksum=81401c29cf6f9909761c636b4778ccc0 (do not edit this line) */

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/Token.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/Token.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/Token.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/Token.java Fri Jan 17 17:23:33 2014
@@ -128,4 +128,4 @@ public class Token implements java.io.Se
   }
 
 }
-/* JavaCC - OriginalChecksum=ea8b1e55950603be28e2f63dcd544ab4 (do not edit this line) */
+/* JavaCC - OriginalChecksum=30bbd23e0dec26f141130dc62a4f6e9d (do not edit this line) */

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/TokenMgrError.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/TokenMgrError.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/TokenMgrError.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/TokenMgrError.java Fri Jan 17 17:23:33 2014
@@ -48,7 +48,7 @@ public class TokenMgrError extends Error
    * equivalents in the given string
    */
   protected static final String addEscapes(String str) {
-    StringBuffer retval = new StringBuffer();
+    StringBuilder retval = new StringBuilder();
     char ch;
     for (int i = 0; i < str.length(); i++) {
       switch (str.charAt(i))
@@ -144,4 +144,4 @@ public class TokenMgrError extends Error
     this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason);
   }
 }
-/* JavaCC - OriginalChecksum=be88283d82a985d82a34dda46bcf42d5 (do not edit this line) */
+/* JavaCC - OriginalChecksum=3ca7fbf7de9f2424b131a5499b0a78d0 (do not edit this line) */

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java Fri Jan 17 17:23:33 2014
@@ -18,7 +18,9 @@ package org.apache.lucene.queryparser.fl
  */
 
 import java.io.IOException;
+
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
 
@@ -29,9 +31,12 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
 import org.apache.lucene.queryparser.flexible.core.config.QueryConfigHandler;
+import org.apache.lucene.queryparser.flexible.core.nodes.BooleanQueryNode;
 import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode;
 import org.apache.lucene.queryparser.flexible.core.nodes.FuzzyQueryNode;
 import org.apache.lucene.queryparser.flexible.core.nodes.GroupQueryNode;
+import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode;
+import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode.Modifier;
 import org.apache.lucene.queryparser.flexible.core.nodes.NoTokenFoundQueryNode;
 import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
 import org.apache.lucene.queryparser.flexible.core.nodes.QuotedFieldQueryNode;
@@ -40,6 +45,7 @@ import org.apache.lucene.queryparser.fle
 import org.apache.lucene.queryparser.flexible.core.nodes.TokenizedPhraseQueryNode;
 import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessorImpl;
 import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.ConfigurationKeys;
+import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfigHandler.Operator;
 import org.apache.lucene.queryparser.flexible.standard.nodes.MultiPhraseQueryNode;
 import org.apache.lucene.queryparser.flexible.standard.nodes.RegexpQueryNode;
 import org.apache.lucene.queryparser.flexible.standard.nodes.StandardBooleanQueryNode;
@@ -72,6 +78,8 @@ public class AnalyzerQueryNodeProcessor 
   private Analyzer analyzer;
 
   private boolean positionIncrementsEnabled;
+  
+  private Operator defaultOperator;
 
   public AnalyzerQueryNodeProcessor() {
     // empty constructor
@@ -85,7 +93,9 @@ public class AnalyzerQueryNodeProcessor 
       this.analyzer = analyzer;
       this.positionIncrementsEnabled = false;
       Boolean positionIncrementsEnabled = getQueryConfigHandler().get(ConfigurationKeys.ENABLE_POSITION_INCREMENTS);
-
+      Operator defaultOperator = getQueryConfigHandler().get(ConfigurationKeys.DEFAULT_OPERATOR);
+      this.defaultOperator = defaultOperator != null ? defaultOperator : Operator.OR;
+      
       if (positionIncrementsEnabled != null) {
           this.positionIncrementsEnabled = positionIncrementsEnabled;
       }
@@ -93,7 +103,6 @@ public class AnalyzerQueryNodeProcessor 
       if (this.analyzer != null) {
         return super.process(queryTree);
       }
-
     }
 
     return queryTree;
@@ -119,197 +128,250 @@ public class AnalyzerQueryNodeProcessor 
       int positionCount = 0;
       boolean severalTokensAtSamePosition = false;
       
-      try (TokenStream source = this.analyzer.tokenStream(field, text)) {
-        source.reset();
-        buffer = new CachingTokenFilter(source);
-
-        if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
-          posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
-        }
-
-        try {
-
-          while (buffer.incrementToken()) {
-            numTokens++;
-            int positionIncrement = (posIncrAtt != null) ? posIncrAtt
-                .getPositionIncrement() : 1;
-            if (positionIncrement != 0) {
-              positionCount += positionIncrement;
-
-            } else {
-              severalTokensAtSamePosition = true;
+      try {
+        try (TokenStream source = this.analyzer.tokenStream(field, text)) {
+          source.reset();
+          buffer = new CachingTokenFilter(source);
+  
+          if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
+            posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
+          }
+  
+          try {
+  
+            while (buffer.incrementToken()) {
+              numTokens++;
+              int positionIncrement = (posIncrAtt != null) ? posIncrAtt
+                  .getPositionIncrement() : 1;
+              if (positionIncrement != 0) {
+                positionCount += positionIncrement;
+  
+              } else {
+                severalTokensAtSamePosition = true;
+              }
+  
             }
-
+  
+          } catch (IOException e) {
+            // ignore
           }
-
         } catch (IOException e) {
-          // ignore
+          throw new RuntimeException(e);
         }
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-      
-      // rewind the buffer stream
-      buffer.reset();
-
-      if (!buffer.hasAttribute(CharTermAttribute.class)) {
-        return new NoTokenFoundQueryNode();
-      }
-
-      CharTermAttribute termAtt = buffer.getAttribute(CharTermAttribute.class);
-
-      if (numTokens == 0) {
-        return new NoTokenFoundQueryNode();
-
-      } else if (numTokens == 1) {
-        String term = null;
-        try {
-          boolean hasNext;
-          hasNext = buffer.incrementToken();
-          assert hasNext == true;
-          term = termAtt.toString();
-
-        } catch (IOException e) {
-          // safe to ignore, because we know the number of tokens
+        
+        // rewind the buffer stream
+        buffer.reset();
+  
+        if (!buffer.hasAttribute(CharTermAttribute.class)) {
+          return new NoTokenFoundQueryNode();
         }
-
-        fieldNode.setText(term);
-
-        return fieldNode;
-
-      } else if (severalTokensAtSamePosition || !(node instanceof QuotedFieldQueryNode)) {
-        if (positionCount == 1 || !(node instanceof QuotedFieldQueryNode)) {
-          // no phrase query:
-          LinkedList<QueryNode> children = new LinkedList<QueryNode>();
-
-          for (int i = 0; i < numTokens; i++) {
-            String term = null;
-            try {
-              boolean hasNext = buffer.incrementToken();
-              assert hasNext == true;
-              term = termAtt.toString();
-
-            } catch (IOException e) {
-              // safe to ignore, because we know the number of tokens
+  
+        CharTermAttribute termAtt = buffer.getAttribute(CharTermAttribute.class);
+  
+        if (numTokens == 0) {
+          return new NoTokenFoundQueryNode();
+  
+        } else if (numTokens == 1) {
+          String term = null;
+          try {
+            boolean hasNext;
+            hasNext = buffer.incrementToken();
+            assert hasNext == true;
+            term = termAtt.toString();
+  
+          } catch (IOException e) {
+            // safe to ignore, because we know the number of tokens
+          }
+  
+          fieldNode.setText(term);
+  
+          return fieldNode;
+  
+        } else if (severalTokensAtSamePosition || !(node instanceof QuotedFieldQueryNode)) {
+          if (positionCount == 1 || !(node instanceof QuotedFieldQueryNode)) {
+            // no phrase query:
+            
+            if (positionCount == 1) {
+              // simple case: only one position, with synonyms
+              LinkedList<QueryNode> children = new LinkedList<QueryNode>();
+              
+              for (int i = 0; i < numTokens; i++) {
+                String term = null;
+                try {
+                  boolean hasNext = buffer.incrementToken();
+                  assert hasNext == true;
+                  term = termAtt.toString();
+                  
+                } catch (IOException e) {
+                  // safe to ignore, because we know the number of tokens
+                }
+                
+                children.add(new FieldQueryNode(field, term, -1, -1));
+                
+              }
+              return new GroupQueryNode(
+                  new StandardBooleanQueryNode(children, positionCount==1));
+            } else {
+              // multiple positions
+              QueryNode q = new StandardBooleanQueryNode(Collections.<QueryNode>emptyList(),false);
+              QueryNode currentQuery = null;
+              for (int i = 0; i < numTokens; i++) {
+                String term = null;
+                try {
+                  boolean hasNext = buffer.incrementToken();
+                  assert hasNext == true;
+                  term = termAtt.toString();
+                } catch (IOException e) {
+                  // safe to ignore, because we know the number of tokens
+                }
+                if (posIncrAtt != null && posIncrAtt.getPositionIncrement() == 0) {
+                  if (!(currentQuery instanceof BooleanQueryNode)) {
+                    QueryNode t = currentQuery;
+                    currentQuery = new StandardBooleanQueryNode(Collections.<QueryNode>emptyList(), true);
+                    ((BooleanQueryNode)currentQuery).add(t);
+                  }
+                  ((BooleanQueryNode)currentQuery).add(new FieldQueryNode(field, term, -1, -1));
+                } else {
+                  if (currentQuery != null) {
+                    if (this.defaultOperator == Operator.OR) {
+                      q.add(currentQuery);
+                    } else {
+                      q.add(new ModifierQueryNode(currentQuery, Modifier.MOD_REQ));
+                    }
+                  }
+                  currentQuery = new FieldQueryNode(field, term, -1, -1);
+                }
+              }
+              if (this.defaultOperator == Operator.OR) {
+                q.add(currentQuery);
+              } else {
+                q.add(new ModifierQueryNode(currentQuery, Modifier.MOD_REQ));
+              }
+              
+              if (q instanceof BooleanQueryNode) {
+                q = new GroupQueryNode(q);
+              }
+              return q;
             }
-
-            children.add(new FieldQueryNode(field, term, -1, -1));
-
+          } else {
+            // phrase query:
+            MultiPhraseQueryNode mpq = new MultiPhraseQueryNode();
+  
+            List<FieldQueryNode> multiTerms = new ArrayList<FieldQueryNode>();
+            int position = -1;
+            int i = 0;
+            int termGroupCount = 0;
+            for (; i < numTokens; i++) {
+              String term = null;
+              int positionIncrement = 1;
+              try {
+                boolean hasNext = buffer.incrementToken();
+                assert hasNext == true;
+                term = termAtt.toString();
+                if (posIncrAtt != null) {
+                  positionIncrement = posIncrAtt.getPositionIncrement();
+                }
+  
+              } catch (IOException e) {
+                // safe to ignore, because we know the number of tokens
+              }
+  
+              if (positionIncrement > 0 && multiTerms.size() > 0) {
+  
+                for (FieldQueryNode termNode : multiTerms) {
+  
+                  if (this.positionIncrementsEnabled) {
+                    termNode.setPositionIncrement(position);
+                  } else {
+                    termNode.setPositionIncrement(termGroupCount);
+                  }
+  
+                  mpq.add(termNode);
+  
+                }
+  
+                // Only increment once for each "group" of
+                // terms that were in the same position:
+                termGroupCount++;
+  
+                multiTerms.clear();
+  
+              }
+  
+              position += positionIncrement;
+              multiTerms.add(new FieldQueryNode(field, term, -1, -1));
+  
+            }
+  
+            for (FieldQueryNode termNode : multiTerms) {
+  
+              if (this.positionIncrementsEnabled) {
+                termNode.setPositionIncrement(position);
+  
+              } else {
+                termNode.setPositionIncrement(termGroupCount);
+              }
+  
+              mpq.add(termNode);
+  
+            }
+  
+            return mpq;
+  
           }
-          return new GroupQueryNode(
-            new StandardBooleanQueryNode(children, positionCount==1));
+  
         } else {
-          // phrase query:
-          MultiPhraseQueryNode mpq = new MultiPhraseQueryNode();
-
-          List<FieldQueryNode> multiTerms = new ArrayList<FieldQueryNode>();
+  
+          TokenizedPhraseQueryNode pq = new TokenizedPhraseQueryNode();
+  
           int position = -1;
-          int i = 0;
-          int termGroupCount = 0;
-          for (; i < numTokens; i++) {
+  
+          for (int i = 0; i < numTokens; i++) {
             String term = null;
             int positionIncrement = 1;
+  
             try {
               boolean hasNext = buffer.incrementToken();
               assert hasNext == true;
               term = termAtt.toString();
+  
               if (posIncrAtt != null) {
                 positionIncrement = posIncrAtt.getPositionIncrement();
               }
-
+  
             } catch (IOException e) {
               // safe to ignore, because we know the number of tokens
             }
-
-            if (positionIncrement > 0 && multiTerms.size() > 0) {
-
-              for (FieldQueryNode termNode : multiTerms) {
-
-                if (this.positionIncrementsEnabled) {
-                  termNode.setPositionIncrement(position);
-                } else {
-                  termNode.setPositionIncrement(termGroupCount);
-                }
-
-                mpq.add(termNode);
-
-              }
-
-              // Only increment once for each "group" of
-              // terms that were in the same position:
-              termGroupCount++;
-
-              multiTerms.clear();
-
-            }
-
-            position += positionIncrement;
-            multiTerms.add(new FieldQueryNode(field, term, -1, -1));
-
-          }
-
-          for (FieldQueryNode termNode : multiTerms) {
-
+  
+            FieldQueryNode newFieldNode = new FieldQueryNode(field, term, -1, -1);
+  
             if (this.positionIncrementsEnabled) {
-              termNode.setPositionIncrement(position);
-
+              position += positionIncrement;
+              newFieldNode.setPositionIncrement(position);
+  
             } else {
-              termNode.setPositionIncrement(termGroupCount);
+              newFieldNode.setPositionIncrement(i);
             }
-
-            mpq.add(termNode);
-
+  
+            pq.add(newFieldNode);
+  
           }
-
-          return mpq;
-
+  
+          return pq;
+  
         }
-
-      } else {
-
-        TokenizedPhraseQueryNode pq = new TokenizedPhraseQueryNode();
-
-        int position = -1;
-
-        for (int i = 0; i < numTokens; i++) {
-          String term = null;
-          int positionIncrement = 1;
-
+      } finally {
+        if (buffer != null) {
           try {
-            boolean hasNext = buffer.incrementToken();
-            assert hasNext == true;
-            term = termAtt.toString();
-
-            if (posIncrAtt != null) {
-              positionIncrement = posIncrAtt.getPositionIncrement();
-            }
-
+            buffer.close();
           } catch (IOException e) {
-            // safe to ignore, because we know the number of tokens
-          }
-
-          FieldQueryNode newFieldNode = new FieldQueryNode(field, term, -1, -1);
-
-          if (this.positionIncrementsEnabled) {
-            position += positionIncrement;
-            newFieldNode.setPositionIncrement(position);
-
-          } else {
-            newFieldNode.setPositionIncrement(i);
+            // safe to ignore
           }
-
-          pq.add(newFieldNode);
-
         }
-
-        return pq;
-
       }
-
     }
 
     return node;
-
   }
 
   @Override

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/CharStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/CharStream.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/CharStream.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/CharStream.java Fri Jan 17 17:23:33 2014
@@ -112,4 +112,4 @@ interface CharStream {
   void Done();
 
 }
-/* JavaCC - OriginalChecksum=242ae59b965491e225a44534cbc73b42 (do not edit this line) */
+/* JavaCC - OriginalChecksum=5ca20c9145f29a0f8909470a7f949fe4 (do not edit this line) */

Modified: lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/ParseException.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/ParseException.java?rev=1559196&r1=1559195&r2=1559196&view=diff
==============================================================================
--- lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/ParseException.java (original)
+++ lucene/dev/branches/lucene5376/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/ParseException.java Fri Jan 17 17:23:33 2014
@@ -89,7 +89,7 @@ public class ParseException extends Exce
                            int[][] expectedTokenSequences,
                            String[] tokenImage) {
     String eol = System.getProperty("line.separator", "\n");
-    StringBuffer expected = new StringBuffer();
+    StringBuilder expected = new StringBuilder();
     int maxSize = 0;
     for (int i = 0; i < expectedTokenSequences.length; i++) {
       if (maxSize < expectedTokenSequences[i].length) {
@@ -139,7 +139,7 @@ public class ParseException extends Exce
    * string literal.
    */
   static String add_escapes(String str) {
-      StringBuffer retval = new StringBuffer();
+      StringBuilder retval = new StringBuilder();
       char ch;
       for (int i = 0; i < str.length(); i++) {
         switch (str.charAt(i))
@@ -184,4 +184,4 @@ public class ParseException extends Exce
    }
 
 }
-/* JavaCC - OriginalChecksum=bd8163f41bf2fd1bb00f025fce3dcaaf (do not edit this line) */
+/* JavaCC - OriginalChecksum=be6f55e3bf157e8c96b4c06cca5ec81b (do not edit this line) */