You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2015/01/04 15:53:21 UTC

svn commit: r1649347 [8/31] - in /lucene/dev/branches/lucene6005: ./ dev-tools/ dev-tools/idea/solr/contrib/dataimporthandler-extras/ dev-tools/idea/solr/contrib/extraction/ dev-tools/idea/solr/contrib/map-reduce/ dev-tools/idea/solr/contrib/velocity/ ...

Modified: lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java (original)
+++ lucene/dev/branches/lucene6005/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java Sun Jan  4 14:53:12 2015
@@ -1410,10 +1410,10 @@ public class TestFSTs extends LuceneTest
       Util.TopResults<Long> r = Util.shortestPaths(fst, arc, fst.outputs.getNoOutput(), minLongComparator, topN, true);
       assertTrue(r.isComplete);
 
-      // 2. go thru whole treemap (slowCompletor) and check its actually the best suggestion
+      // 2. go thru whole treemap (slowCompletor) and check it's actually the best suggestion
       final List<Result<Long>> matches = new ArrayList<>();
 
-      // TODO: could be faster... but its slowCompletor for a reason
+      // TODO: could be faster... but it's slowCompletor for a reason
       for (Map.Entry<String,Long> e : slowCompletor.entrySet()) {
         if (e.getKey().startsWith(prefix)) {
           //System.out.println("  consider " + e.getKey());
@@ -1531,10 +1531,10 @@ public class TestFSTs extends LuceneTest
 
       Util.TopResults<Pair<Long,Long>> r = Util.shortestPaths(fst, arc, fst.outputs.getNoOutput(), minPairWeightComparator, topN, true);
       assertTrue(r.isComplete);
-      // 2. go thru whole treemap (slowCompletor) and check its actually the best suggestion
+      // 2. go thru whole treemap (slowCompletor) and check it's actually the best suggestion
       final List<Result<Pair<Long,Long>>> matches = new ArrayList<>();
 
-      // TODO: could be faster... but its slowCompletor for a reason
+      // TODO: could be faster... but it's slowCompletor for a reason
       for (Map.Entry<String,TwoLongs> e : slowCompletor.entrySet()) {
         if (e.getKey().startsWith(prefix)) {
           //System.out.println("  consider " + e.getKey());
@@ -1586,4 +1586,51 @@ public class TestFSTs extends LuceneTest
     }
   }
 
+  public void testIllegallyModifyRootArc() throws Exception {
+    assumeTrue("test relies on assertions", assertsAreEnabled);
+
+    Set<BytesRef> terms = new HashSet<>();
+    for(int i=0;i<100;i++) {
+      String prefix = Character.toString((char) ('a' + i));
+      terms.add(new BytesRef(prefix));
+      if (prefix.equals("m") == false) {
+        for(int j=0;j<20;j++) {
+          // Make a big enough FST that the root cache will be created:
+          String suffix = TestUtil.randomRealisticUnicodeString(random(), 10, 20);
+          terms.add(new BytesRef(prefix + suffix));
+        }
+      }
+    }
+
+    List<BytesRef> termsList = new ArrayList<>(terms);
+    Collections.sort(termsList);
+
+    ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton();
+    Builder<BytesRef> builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs);
+
+    IntsRefBuilder input = new IntsRefBuilder();
+    for(BytesRef term : termsList) {
+      Util.toIntsRef(term, input);
+      builder.add(input.get(), term);
+    }
+
+    FST<BytesRef> fst = builder.finish();
+    
+    Arc<BytesRef> arc = new FST.Arc<>();
+    fst.getFirstArc(arc);
+    FST.BytesReader reader = fst.getBytesReader();
+    arc = fst.findTargetArc((int) 'm', arc, arc, reader);
+    assertNotNull(arc);
+    assertEquals(new BytesRef("m"), arc.output);
+
+    // NOTE: illegal:
+    arc.output.length = 0;
+
+    fst.getFirstArc(arc);
+    try {
+      arc = fst.findTargetArc((int) 'm', arc, arc, reader);
+    } catch (AssertionError ae) {
+      // expected
+    }
+  }
 }

Modified: lucene/dev/branches/lucene6005/lucene/demo/build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/demo/build.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/demo/build.xml (original)
+++ lucene/dev/branches/lucene6005/lucene/demo/build.xml Sun Jan  4 14:53:12 2015
@@ -42,7 +42,7 @@
 
   <target name="javadocs" depends="javadocs-analyzers-common,javadocs-queryparser,javadocs-facet,javadocs-expressions,compile-core,check-javadocs-uptodate"
           unless="javadocs-uptodate-${name}">
-    <!-- we link the example source in the javadocs, as its ref'ed elsewhere -->
+    <!-- we link the example source in the javadocs, as it's ref'ed elsewhere -->
     <invoke-module-javadoc linksource="yes">
       <links>
         <link href="../analyzers-common"/>

Modified: lucene/dev/branches/lucene6005/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java (original)
+++ lucene/dev/branches/lucene6005/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java Sun Jan  4 14:53:12 2015
@@ -104,7 +104,7 @@ public class JavascriptCompiler {
     return org.objectweb.asm.commons.Method.getMethod(method);
   }
   
-  // This maximum length is theoretically 65535 bytes, but as its CESU-8 encoded we dont know how large it is in bytes, so be safe
+  // This maximum length is theoretically 65535 bytes, but as it's CESU-8 encoded we dont know how large it is in bytes, so be safe
   // rcmuir: "If your ranking function is that large you need to check yourself into a mental institution!"
   private static final int MAX_SOURCE_LENGTH = 16384;
   

Modified: lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CachedOrdinalsReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CachedOrdinalsReader.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CachedOrdinalsReader.java (original)
+++ lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CachedOrdinalsReader.java Sun Jan  4 14:53:12 2015
@@ -18,13 +18,13 @@ package org.apache.lucene.facet.taxonomy
  */
 
 import java.io.IOException;
-import java.util.Collections;
+import java.util.Collection;
 import java.util.Map;
 import java.util.WeakHashMap;
 
 import org.apache.lucene.codecs.DocValuesFormat;
-import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.util.Accountable;
 import org.apache.lucene.util.Accountables;
 import org.apache.lucene.util.ArrayUtil;
@@ -161,7 +161,7 @@ public class CachedOrdinalsReader extend
   }
   
   @Override
-  public synchronized Iterable<? extends Accountable> getChildResources() {
+  public synchronized Collection<Accountable> getChildResources() {
     return Accountables.namedAccountables("segment", ordsCache);
   }
 }

Modified: lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java (original)
+++ lucene/dev/branches/lucene6005/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java Sun Jan  4 14:53:12 2015
@@ -38,7 +38,7 @@ class TaxonomyIndexArrays extends Parall
 
   private final int[] parents;
 
-  // the following two arrays are lazily intialized. note that we only keep a
+  // the following two arrays are lazily initialized. note that we only keep a
   // single boolean member as volatile, instead of declaring the arrays
   // volatile. the code guarantees that only after the boolean is set to true,
   // the arrays are returned.

Modified: lucene/dev/branches/lucene6005/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java (original)
+++ lucene/dev/branches/lucene6005/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java Sun Jan  4 14:53:12 2015
@@ -224,7 +224,7 @@ public class TestTaxonomyCombined extend
     // Now, open the same taxonomy and add the same categories again.
     // After a few categories, the LuceneTaxonomyWriter implementation
     // will stop looking for each category on disk, and rather read them
-    // all into memory and close it's reader. The bug was that it closed
+    // all into memory and close its reader. The bug was that it closed
     // the reader, but forgot that it did (because it didn't set the reader
     // reference to null).
     tw = new DirectoryTaxonomyWriter(indexDir);
@@ -743,7 +743,7 @@ public class TestTaxonomyCombined extend
   private void assertConsistentYoungestChild(final FacetLabel abPath,
       final int abOrd, final int abYoungChildBase1, final int abYoungChildBase2, final int retry, int numCategories)
       throws Exception {
-    SlowRAMDirectory indexDir = new SlowRAMDirectory(-1, null); // no slowness for intialization
+    SlowRAMDirectory indexDir = new SlowRAMDirectory(-1, null); // no slowness for initialization
     TaxonomyWriter tw = new DirectoryTaxonomyWriter(indexDir);
     tw.addCategory(new FacetLabel("a", "0"));
     tw.addCategory(abPath);

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java Sun Jan  4 14:53:12 2015
@@ -187,7 +187,6 @@ public class Highlighter
 
     CharTermAttribute termAtt = tokenStream.addAttribute(CharTermAttribute.class);
     OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
-    tokenStream.reset();
     TextFragment currentFrag =  new TextFragment(newText,newText.length(), docFrags.size());
 
     if (fragmentScorer instanceof QueryScorer) {
@@ -214,6 +213,7 @@ public class Highlighter
 
       TokenGroup tokenGroup=new TokenGroup(tokenStream);
 
+      tokenStream.reset();
       for (boolean next = tokenStream.incrementToken(); next && (offsetAtt.startOffset()< maxDocCharsToAnalyze);
             next = tokenStream.incrementToken())
       {

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java Sun Jan  4 14:53:12 2015
@@ -21,18 +21,24 @@ import java.io.IOException;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.AttributeFactory;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefArray;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.Counter;
 import org.apache.lucene.util.UnicodeUtil;
 
 /**
  * TokenStream created from a term vector field. The term vector requires positions and/or offsets (either). If you
  * want payloads add PayloadAttributeImpl (as you would normally) but don't assume the attribute is already added just
- * because you know the term vector has payloads.  This TokenStream supports an efficient {@link #reset()}, so there's
+ * because you know the term vector has payloads, since the first call to incrementToken() will observe if you asked
+ * for them and if not then won't get them.  This TokenStream supports an efficient {@link #reset()}, so there's
  * no need to wrap with a caching impl.
  * <p />
  * The implementation will create an array of tokens indexed by token position.  As long as there aren't massive jumps
@@ -47,6 +53,11 @@ public final class TokenStreamFromTermVe
 
   //TODO add a maxStartOffset filter, which highlighters will find handy
 
+  //This attribute factory uses less memory when captureState() is called.
+  public static final AttributeFactory ATTRIBUTE_FACTORY =
+      AttributeFactory.getStaticImplementation(
+          AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, PackedTokenAttributeImpl.class);
+
   private final Terms vector;
 
   private final CharTermAttribute termAttribute;
@@ -56,11 +67,15 @@ public final class TokenStreamFromTermVe
   private OffsetAttribute offsetAttribute;//maybe null
 
   private PayloadAttribute payloadAttribute;//maybe null
+  private BytesRefArray payloadsBytesRefArray;//only used when payloadAttribute is non-null
+  private BytesRefBuilder spareBytesRefBuilder;//only used when payloadAttribute is non-null
 
   private TokenLL firstToken = null; // the head of a linked-list
 
   private TokenLL incrementToken = null;
 
+  private boolean initialized = false;//lazy
+
   /**
    * Constructor.
    * 
@@ -68,6 +83,8 @@ public final class TokenStreamFromTermVe
    *        creating the TokenStream. Must have positions and/or offsets.
    */
   public TokenStreamFromTermVector(Terms vector) throws IOException {
+    super(ATTRIBUTE_FACTORY);
+    assert !hasAttribute(PayloadAttribute.class) : "AttributeFactory shouldn't have payloads *yet*";
     if (!vector.hasPositions() && !vector.hasOffsets()) {
       throw new IllegalArgumentException("The term vector needs positions and/or offsets.");
     }
@@ -81,20 +98,20 @@ public final class TokenStreamFromTermVe
 
   @Override
   public void reset() throws IOException {
-    if (firstToken == null) {//just the first time
-      init();
-    }
     incrementToken = null;
     super.reset();
   }
 
-  //We initialize in reset() because we can see which attributes the consumer wants, particularly payloads
+  //We delay initialization because we can see which attributes the consumer wants, particularly payloads
   private void init() throws IOException {
+    assert !initialized;
     if (vector.hasOffsets()) {
       offsetAttribute = addAttribute(OffsetAttribute.class);
     }
     if (vector.hasPayloads() && hasAttribute(PayloadAttribute.class)) {
       payloadAttribute = getAttribute(PayloadAttribute.class);
+      payloadsBytesRefArray = new BytesRefArray(Counter.newCounter());
+      spareBytesRefBuilder = new BytesRefBuilder();
     }
 
     // Step 1: iterate termsEnum and create a token, placing into an array of tokens by position
@@ -132,13 +149,8 @@ public final class TokenStreamFromTermVe
         }
 
         if (payloadAttribute != null) {
-          // Must make a deep copy of the returned payload,
-          // since D&PEnum API is allowed to re-use on every
-          // call:
           final BytesRef payload = dpEnum.getPayload();
-          if (payload != null) {
-            token.payload = BytesRef.deepCopyOf(payload);//TODO share a ByteBlockPool & re-use BytesRef
-          }
+          token.payloadIndex = payload == null ? -1 : payloadsBytesRefArray.append(payload);
         }
 
         //Add token to an array indexed by position
@@ -198,6 +210,8 @@ public final class TokenStreamFromTermVe
       prevTokenPos = pos;
       prevToken = token;
     }
+
+    initialized = true;
   }
 
   private TokenLL[] initTokensArray() throws IOException {
@@ -216,8 +230,12 @@ public final class TokenStreamFromTermVe
   }
 
   @Override
-  public boolean incrementToken() {
+  public boolean incrementToken() throws IOException {
     if (incrementToken == null) {
+      if (!initialized) {
+        init();
+        assert initialized;
+      }
       incrementToken = firstToken;
       if (incrementToken == null) {
         return false;
@@ -234,7 +252,11 @@ public final class TokenStreamFromTermVe
       offsetAttribute.setOffset(incrementToken.startOffset, incrementToken.endOffset);
     }
     if (payloadAttribute != null) {
-      payloadAttribute.setPayload(incrementToken.payload);
+      if (incrementToken.payloadIndex == -1) {
+        payloadAttribute.setPayload(null);
+      } else {
+        payloadAttribute.setPayload(payloadsBytesRefArray.get(spareBytesRefBuilder, incrementToken.payloadIndex));
+      }
     }
     return true;
   }
@@ -245,7 +267,7 @@ public final class TokenStreamFromTermVe
     int positionIncrement;
     int startOffset;
     int endOffset;
-    BytesRef payload;
+    int payloadIndex;
 
     TokenLL next;
 

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java Sun Jan  4 14:53:12 2015
@@ -394,7 +394,6 @@ public class WeightedSpanTermExtractor {
           indexer.addField(DelegatingLeafReader.FIELD_NAME,
               new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
         }
-        tokenStream.reset();//reset to beginning when we return
         final IndexSearcher searcher = indexer.createSearcher();
         // MEM index has only atomic ctx
         internalReader = ((LeafReaderContext) searcher.getTopReaderContext()).reader();

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/DefaultPassageFormatter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/DefaultPassageFormatter.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/DefaultPassageFormatter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/DefaultPassageFormatter.java Sun Jan  4 14:53:12 2015
@@ -62,7 +62,7 @@ public class DefaultPassageFormatter ext
     StringBuilder sb = new StringBuilder();
     int pos = 0;
     for (Passage passage : passages) {
-      // don't add ellipsis if its the first one, or if its connected.
+      // don't add ellipsis if it's the first one, or if it's connected.
       if (passage.startOffset > pos && pos > 0) {
         sb.append(ellipsis);
       }
@@ -70,7 +70,7 @@ public class DefaultPassageFormatter ext
       for (int i = 0; i < passage.numMatches; i++) {
         int start = passage.matchStarts[i];
         int end = passage.matchEnds[i];
-        // its possible to have overlapping terms
+        // it's possible to have overlapping terms
         if (start > pos) {
           append(sb, content, pos, start);
         }
@@ -81,7 +81,7 @@ public class DefaultPassageFormatter ext
           pos = end;
         }
       }
-      // its possible a "term" from the analyzer could span a sentence boundary.
+      // it's possible a "term" from the analyzer could span a sentence boundary.
       append(sb, content, pos, Math.max(pos, passage.endOffset));
       pos = passage.endOffset;
     }

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java Sun Jan  4 14:53:12 2015
@@ -157,7 +157,7 @@ class MultiTermHighlighting {
         final CharsRef scratch = new CharsRef();
         final Comparator<CharsRef> comparator = CharsRef.getUTF16SortedAsUTF8Comparator();
         
-        // this is *not* an automaton, but its very simple
+        // this is *not* an automaton, but it's very simple
         list.add(new CharacterRunAutomaton(Automata.makeEmpty()) {
           @Override
           public boolean run(char[] s, int offset, int length) {

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/Passage.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/Passage.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/Passage.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/Passage.java Sun Jan  4 14:53:12 2015
@@ -141,7 +141,7 @@ public final class Passage {
   /**
    * End offsets of the term matches, corresponding with {@link #getMatchStarts}. 
    * <p>
-   * Only {@link #getNumMatches} are valid. Note that its possible that an end offset 
+   * Only {@link #getNumMatches} are valid. Note that it's possible that an end offset 
    * could exceed beyond the bounds of the passage ({@link #getEndOffset()}), if the 
    * Analyzer produced a term which spans a passage boundary.
    */

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleBoundaryScanner.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleBoundaryScanner.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleBoundaryScanner.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleBoundaryScanner.java Sun Jan  4 14:53:12 2015
@@ -66,7 +66,7 @@ public class SimpleBoundaryScanner imple
       if( boundaryChars.contains( buffer.charAt( offset - 1 ) ) ) return offset;
       offset--;
     }
-    // if we scanned up to the start of the text, return it, its a "boundary"
+    // if we scanned up to the start of the text, return it, it's a "boundary"
     if (offset == 0) {
       return 0;
     }

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java Sun Jan  4 14:53:12 2015
@@ -25,7 +25,6 @@ import org.apache.lucene.index.IndexRead
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.queries.TermFilter;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.ConstantScoreQuery;
@@ -38,6 +37,7 @@ import org.apache.lucene.search.PrefixQu
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.RegexpQuery;
 import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TermFilter;
 import org.apache.lucene.search.TermRangeQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.WildcardQuery;
@@ -786,7 +786,7 @@ public class TestMultiTermHighlighting e
             StringBuilder sb = new StringBuilder();
             int pos = 0;
             for (Passage passage : passages) {
-              // don't add ellipsis if its the first one, or if its connected.
+              // don't add ellipsis if it's the first one, or if it's connected.
               if (passage.startOffset > pos && pos > 0) {
                 sb.append("... ");
               }
@@ -794,7 +794,7 @@ public class TestMultiTermHighlighting e
               for (int i = 0; i < passage.numMatches; i++) {
                 int start = passage.matchStarts[i];
                 int end = passage.matchEnds[i];
-                // its possible to have overlapping terms
+                // it's possible to have overlapping terms
                 if (start > pos) {
                   sb.append(content, pos, start);
                 }
@@ -808,7 +808,7 @@ public class TestMultiTermHighlighting e
                   pos = end;
                 }
               }
-              // its possible a "term" from the analyzer could span a sentence boundary.
+              // it's possible a "term" from the analyzer could span a sentence boundary.
               sb.append(content, pos, Math.max(pos, passage.endOffset));
               pos = passage.endOffset;
             }

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighter.java Sun Jan  4 14:53:12 2015
@@ -1057,7 +1057,7 @@ public class TestPostingsHighlighter ext
       highlighter.highlight("nofield", query, searcher, topDocs);
       fail("did not hit exception");
     } catch (Exception e) {
-      assertEquals(e.getMessage(), "field \"nofield\" is not recognized");
+      assertEquals(e.getMessage(), "field \"nofield\" is not recognized; known fields: [$fieldnames, body, not, title, yes]");
     }
 
     // we didn't enable highlighting for this atom field

Modified: lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/BreakIteratorBoundaryScannerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/BreakIteratorBoundaryScannerTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/BreakIteratorBoundaryScannerTest.java (original)
+++ lucene/dev/branches/lucene6005/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/BreakIteratorBoundaryScannerTest.java Sun Jan  4 14:53:12 2015
@@ -57,7 +57,7 @@ public class BreakIteratorBoundaryScanne
 
   public void testSentenceBoundary() throws Exception {
     StringBuilder text = new StringBuilder(TEXT);
-    // we test this with default locale, its randomized by LuceneTestCase
+    // we test this with default locale, it's randomized by LuceneTestCase
     BreakIterator bi = BreakIterator.getSentenceInstance(Locale.getDefault());
     BoundaryScanner scanner = new BreakIteratorBoundaryScanner(bi);
     
@@ -71,7 +71,7 @@ public class BreakIteratorBoundaryScanne
 
   public void testLineBoundary() throws Exception {
     StringBuilder text = new StringBuilder(TEXT);
-    // we test this with default locale, its randomized by LuceneTestCase
+    // we test this with default locale, it's randomized by LuceneTestCase
     BreakIterator bi = BreakIterator.getLineInstance(Locale.getDefault());
     BoundaryScanner scanner = new BreakIteratorBoundaryScanner(bi);
     

Modified: lucene/dev/branches/lucene6005/lucene/join/src/java/org/apache/lucene/search/join/BitDocIdSetCachingWrapperFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/join/src/java/org/apache/lucene/search/join/BitDocIdSetCachingWrapperFilter.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/join/src/java/org/apache/lucene/search/join/BitDocIdSetCachingWrapperFilter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/join/src/java/org/apache/lucene/search/join/BitDocIdSetCachingWrapperFilter.java Sun Jan  4 14:53:12 2015
@@ -18,6 +18,7 @@ package org.apache.lucene.search.join;
  */
 
 import java.io.IOException;
+import java.util.Collection;
 
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
@@ -90,7 +91,7 @@ public class BitDocIdSetCachingWrapperFi
   }
 
   @Override
-  public Iterable<? extends Accountable> getChildResources() {
+  public Collection<Accountable> getChildResources() {
     return filter.getChildResources();
   }
 

Modified: lucene/dev/branches/lucene6005/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java (original)
+++ lucene/dev/branches/lucene6005/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java Sun Jan  4 14:53:12 2015
@@ -50,6 +50,7 @@ public class ToChildBlockJoinQuery exten
    *  ToChildBlockJoinScorer#validateParentDoc} on mis-use,
    *  when the parent query incorrectly returns child docs. */
   static final String INVALID_QUERY_MESSAGE = "Parent query yields document which is not matched by parents filter, docID=";
+  static final String ILLEGAL_ADVANCE_ON_PARENT = "Expect to be advanced on child docs only. got docID=";
 
   private final BitDocIdSetFilter parentsFilter;
   private final Query parentQuery;
@@ -279,7 +280,6 @@ public class ToChildBlockJoinQuery exten
 
     @Override
     public int advance(int childTarget) throws IOException {
-      assert childTarget >= parentBits.length() || !parentBits.get(childTarget);
       
       //System.out.println("Q.advance childTarget=" + childTarget);
       if (childTarget == NO_MORE_DOCS) {
@@ -287,6 +287,10 @@ public class ToChildBlockJoinQuery exten
         return childDoc = parentDoc = NO_MORE_DOCS;
       }
 
+      if (parentBits.get(childTarget)) {
+        throw new IllegalStateException(ILLEGAL_ADVANCE_ON_PARENT + childTarget);
+      }
+
       assert childDoc == -1 || childTarget != parentDoc: "childTarget=" + childTarget;
       if (childDoc == -1 || childTarget > parentDoc) {
         // Advance to new parent:

Modified: lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java (original)
+++ lucene/dev/branches/lucene6005/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoinValidation.java Sun Jan  4 14:53:12 2015
@@ -29,6 +29,7 @@ import org.apache.lucene.index.IndexWrit
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Filter;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.QueryWrapperFilter;
@@ -112,6 +113,17 @@ public class TestBlockJoinValidation ext
   }
 
   @Test
+  public void testValidationForToChildBjqWithChildFilterQuery() throws Exception {
+    Query parentQueryWithRandomChild = createParentQuery();
+
+    ToChildBlockJoinQuery blockJoinQuery = new ToChildBlockJoinQuery(parentQueryWithRandomChild, parentsFilter, false);
+    Filter childFilter = new QueryWrapperFilter(new TermQuery(new Term("common_field", "1")));
+    thrown.expect(IllegalStateException.class);
+    thrown.expectMessage(ToChildBlockJoinQuery.ILLEGAL_ADVANCE_ON_PARENT);
+    indexSearcher.search(blockJoinQuery, childFilter, 1);
+  }
+
+  @Test
   public void testAdvanceValidationForToChildBjq() throws Exception {
     int randomChildNumber = getRandomChildNumber(0);
     // we need to make advance method meet wrong document, so random child number
@@ -162,6 +174,7 @@ public class TestBlockJoinValidation ext
     Document result = w.newDocument();
     result.addAtom("id", createFieldValue(segmentNumber * AMOUNT_OF_PARENT_DOCS + parentNumber));
     result.addAtom("parent", createFieldValue(parentNumber));
+    result.addAtom("common_field", "1");
     return result;
   }
 
@@ -169,6 +182,7 @@ public class TestBlockJoinValidation ext
     Document result = w.newDocument();
     result.addAtom("id", createFieldValue(segmentNumber * AMOUNT_OF_PARENT_DOCS + parentNumber, childNumber));
     result.addAtom("child", createFieldValue(childNumber));
+    result.addAtom("common_field", "1");
     return result;
   }
 
@@ -200,6 +214,10 @@ public class TestBlockJoinValidation ext
     return childQueryWithRandomParent;
   }
 
+  private static Query createParentQuery() {
+    return new TermQuery(new Term("id", createFieldValue(getRandomParentId())));
+  }
+
   private static int getRandomParentId() {
     return random().nextInt(AMOUNT_OF_PARENT_DOCS * AMOUNT_OF_SEGMENTS);
   }

Copied: lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java (from r1649173, lucene/dev/trunk/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java?p2=lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java&p1=lucene/dev/trunk/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java&r1=1649173&r2=1649347&rev=1649347&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/index/MergeReaderWrapper.java Sun Jan  4 14:53:12 2015
@@ -24,6 +24,7 @@ import org.apache.lucene.codecs.FieldsPr
 import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.util.Bits;
 
 /** this is a hack to make SortingMP fast! */
@@ -85,6 +86,11 @@ class MergeReaderWrapper extends LeafRea
   }
 
   @Override
+  public FieldTypes getFieldTypes() {
+    return in.getFieldTypes();
+  }
+
+  @Override
   public NumericDocValues getNumericDocValues(String field) throws IOException {
     ensureOpen();
     FieldInfo fi = getFieldInfos().fieldInfo(field);

Modified: lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/index/package.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/index/package.html?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/index/package.html (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/index/package.html Sun Jan  4 14:53:12 2015
@@ -17,5 +17,23 @@
 <html>
 <body>
 Misc index tools and index support.
+
+SortingMergePolicy:
+<p>Provides index sorting capablities. The application can use any
+Sort specification, e.g. to sort by fields using DocValues or FieldCache, or to
+reverse the order of the documents (by using SortField.Type.DOC in reverse).
+Multi-level sorts can be specified the same way you would when searching, by
+building Sort from multiple SortFields.
+
+<p>{@link org.apache.lucene.index.SortingMergePolicy} can be used to
+make Lucene sort segments before merging them. This will ensure that every
+segment resulting from a merge will be sorted according to the provided
+{@link org.apache.lucene.search.Sort}. This however makes merging and
+thus indexing slower.
+
+<p>Sorted segments allow for early query termination when the sort order
+matches index order. This makes query execution faster since not all documents
+need to be visited. Please note that this is an expert feature and should not
+be used without a deep understanding of Lucene merging and document collection.
 </body>
 </html>

Modified: lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java Sun Jan  4 14:53:12 2015
@@ -96,7 +96,7 @@ import org.apache.lucene.util.StringHelp
  *
  *   There are actually 256 byte arrays, to compensate for the fact that the pointers
  *   into the byte arrays are only 3 bytes long.  The correct byte array for a document
- *   is a function of it's id.
+ *   is a function of its id.
  *
  *   To save space and speed up faceting, any term that matches enough documents will
  *   not be un-inverted... it will be skipped while building the un-inverted field structure,
@@ -105,7 +105,7 @@ import org.apache.lucene.util.StringHelp
  *   To further save memory, the terms (the actual string values) are not all stored in
  *   memory, but a TermIndex is used to convert term numbers to term values only
  *   for the terms needed after faceting has completed.  Only every 128th term value
- *   is stored, along with it's corresponding term number, and this is used as an
+ *   is stored, along with its corresponding term number, and this is used as an
  *   index to find the closest term and iterate until the desired number is hit (very
  *   much like Lucene's own internal term index).
  *
@@ -314,7 +314,7 @@ public class DocTermOrds implements Acco
     //
     // During this intermediate form, every document has a (potential) byte[]
     // and the int[maxDoc()] array either contains the termNumber list directly
-    // or the *end* offset of the termNumber list in it's byte array (for faster
+    // or the *end* offset of the termNumber list in its byte array (for faster
     // appending and faster creation of the final form).
     //
     // idea... if things are too large while building, we could do a range of docs

Modified: lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java Sun Jan  4 14:53:12 2015
@@ -20,6 +20,7 @@ package org.apache.lucene.uninverting;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -603,12 +604,12 @@ class FieldCacheImpl implements FieldCac
     }
     
     @Override
-    public Iterable<? extends Accountable> getChildResources() {
+    public Collection<Accountable> getChildResources() {
       List<Accountable> resources = new ArrayList<>();
       resources.add(Accountables.namedAccountable("term bytes", bytes));
       resources.add(Accountables.namedAccountable("ord -> term", termOrdToBytesOffset));
       resources.add(Accountables.namedAccountable("doc -> ord", docToTermOrd));
-      return resources;
+      return Collections.unmodifiableList(resources);
     }
   }
 
@@ -746,7 +747,7 @@ class FieldCacheImpl implements FieldCac
     }
 
     @Override
-    public Iterable<? extends Accountable> getChildResources() {
+    public Collection<Accountable> getChildResources() {
       List<Accountable> resources = new ArrayList<>();
       resources.add(Accountables.namedAccountable("term bytes", bytes));
       resources.add(Accountables.namedAccountable("addresses", docToOffset));
@@ -911,8 +912,8 @@ class FieldCacheImpl implements FieldCac
       return DocValues.emptySortedSet();
     } else {
       // if #postings = #docswithfield we know that the field is "single valued enough".
-      // its possible the same term might appear twice in the same document, but SORTED_SET discards frequency.
-      // its still ok with filtering (which we limit to numerics), it just means precisionStep = Inf
+      // it's possible the same term might appear twice in the same document, but SORTED_SET discards frequency.
+      // it's still ok with filtering (which we limit to numerics), it just means precisionStep = Inf
       long numPostings = terms.getSumDocFreq();
       if (numPostings != -1 && numPostings == terms.getDocCount()) {
         return DocValues.singleton(getTermsIndex(reader, field));

Modified: lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheSanityChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheSanityChecker.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheSanityChecker.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheSanityChecker.java Sun Jan  4 14:53:12 2015
@@ -348,7 +348,7 @@ final class FieldCacheSanityChecker {
     public CacheEntry[] getCacheEntries() { return entries; }
     /**
      * Multi-Line representation of this Insanity object, starting with 
-     * the Type and Msg, followed by each CacheEntry.toString() on it's 
+     * the Type and Msg, followed by each CacheEntry.toString() on its 
      * own line prefaced by a tab character
      */
     @Override

Copied: lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java (from r1649173, lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java?p2=lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java&p1=lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java&r1=1649173&r2=1649347&rev=1649347&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/IndexSortingTest.java Sun Jan  4 14:53:12 2015
@@ -49,7 +49,7 @@ public class IndexSortingTest extends So
     List<Integer> values = new ArrayList<>();
     for (int i = 0; i < unsortedReader.maxDoc(); i++) {
       if (liveDocs == null || liveDocs.get(i)) {
-        values.add(Integer.valueOf(unsortedReader.document(i).get(ID_FIELD)));
+        values.add(Integer.valueOf(unsortedReader.document(i).getString(ID_FIELD)));
       }
     }
     int idx = random().nextInt(SORT.length);

Copied: lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java (from r1649173, lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java?p2=lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java&p1=lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java&r1=1649173&r2=1649347&rev=1649347&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java Sun Jan  4 14:53:12 2015
@@ -29,23 +29,13 @@ import org.apache.lucene.analysis.TokenS
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.document.BinaryDocValuesField;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.SortedDocValuesField;
-import org.apache.lucene.document.SortedNumericDocValuesField;
-import org.apache.lucene.document.SortedSetDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInvertState;
-import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
@@ -55,12 +45,12 @@ import org.apache.lucene.index.SlowCompo
 import org.apache.lucene.index.SortedDocValues;
 import org.apache.lucene.index.SortedNumericDocValues;
 import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.SortingLeafReader.SortingDocsAndPositionsEnum;
+import org.apache.lucene.index.SortingLeafReader.SortingDocsEnum;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.SortingLeafReader.SortingDocsAndPositionsEnum;
-import org.apache.lucene.index.SortingLeafReader.SortingDocsEnum;
 import org.apache.lucene.search.CollectionStatistics;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.TermStatistics;
@@ -153,41 +143,33 @@ public abstract class SorterTestBase ext
   protected static final String SORTED_DV_FIELD = "sorted";
   protected static final String SORTED_SET_DV_FIELD = "sorted_set";
   protected static final String TERM_VECTORS_FIELD = "term_vectors";
-
-  private static final FieldType TERM_VECTORS_TYPE = new FieldType(TextField.TYPE_NOT_STORED);
-  static {
-    TERM_VECTORS_TYPE.setStoreTermVectors(true);
-    TERM_VECTORS_TYPE.freeze();
-  }
-  
-  private static final FieldType POSITIONS_TYPE = new FieldType(TextField.TYPE_NOT_STORED);
-  static {
-    POSITIONS_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
-    POSITIONS_TYPE.freeze();
-  }
   
   protected static Directory dir;
   protected static LeafReader unsortedReader;
   protected static LeafReader sortedReader;
   protected static Integer[] sortedValues;
 
-  private static Document doc(final int id, PositionsTokenStream positions) {
-    final Document doc = new Document();
-    doc.add(new StringField(ID_FIELD, Integer.toString(id), Store.YES));
-    doc.add(new StringField(DOCS_ENUM_FIELD, DOCS_ENUM_TERM, Store.NO));
+  private static Document doc(RandomIndexWriter w, final int id, PositionsTokenStream positions) {
+    FieldTypes fieldTypes = w.getFieldTypes();
+    fieldTypes.disableSorting(BINARY_DV_FIELD);
+    fieldTypes.setMultiValued(SORTED_SET_DV_FIELD);
+    fieldTypes.setMultiValued(SORTED_NUMERIC_DV_FIELD);
+    fieldTypes.enableTermVectors(TERM_VECTORS_FIELD);
+
+    final Document doc = w.newDocument();
+    doc.addAtom(ID_FIELD, Integer.toString(id));
+    doc.addAtom(DOCS_ENUM_FIELD, DOCS_ENUM_TERM);
     positions.setId(id);
-    doc.add(new Field(DOC_POSITIONS_FIELD, positions, POSITIONS_TYPE));
-    doc.add(new NumericDocValuesField(NUMERIC_DV_FIELD, id));
-    TextField norms = new TextField(NORMS_FIELD, Integer.toString(id), Store.NO);
-    norms.setBoost(Float.intBitsToFloat(id));
-    doc.add(norms);
-    doc.add(new BinaryDocValuesField(BINARY_DV_FIELD, new BytesRef(Integer.toString(id))));
-    doc.add(new SortedDocValuesField(SORTED_DV_FIELD, new BytesRef(Integer.toString(id))));
-    doc.add(new SortedSetDocValuesField(SORTED_SET_DV_FIELD, new BytesRef(Integer.toString(id))));
-    doc.add(new SortedSetDocValuesField(SORTED_SET_DV_FIELD, new BytesRef(Integer.toString(id + 1))));
-    doc.add(new SortedNumericDocValuesField(SORTED_NUMERIC_DV_FIELD, id));
-    doc.add(new SortedNumericDocValuesField(SORTED_NUMERIC_DV_FIELD, id + 1));
-    doc.add(new Field(TERM_VECTORS_FIELD, Integer.toString(id), TERM_VECTORS_TYPE));
+    doc.addLargeText(DOC_POSITIONS_FIELD, positions);
+    doc.addInt(NUMERIC_DV_FIELD, id);
+    doc.addLargeText(NORMS_FIELD, Integer.toString(id), Float.intBitsToFloat(id));
+    doc.addBinary(BINARY_DV_FIELD, new BytesRef(Integer.toString(id)));
+    doc.addAtom(SORTED_DV_FIELD, new BytesRef(Integer.toString(id)));
+    doc.addAtom(SORTED_SET_DV_FIELD, new BytesRef(Integer.toString(id)));
+    doc.addAtom(SORTED_SET_DV_FIELD, new BytesRef(Integer.toString(id + 1)));
+    doc.addInt(SORTED_NUMERIC_DV_FIELD, id);
+    doc.addInt(SORTED_NUMERIC_DV_FIELD, id + 1);
+    doc.addLargeText(TERM_VECTORS_FIELD, Integer.toString(id));
     return doc;
   }
 
@@ -210,7 +192,7 @@ public abstract class SorterTestBase ext
     RandomIndexWriter writer = new RandomIndexWriter(random, dir, conf);
     writer.setDoRandomForceMerge(false);
     for (int id : ids) {
-      writer.addDocument(doc(id, positions));
+      writer.addDocument(doc(writer, id, positions));
     }
     // delete some documents
     writer.commit();
@@ -321,7 +303,7 @@ public abstract class SorterTestBase ext
     int prev = -1;
     while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
       assertTrue("document " + doc + " marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(doc));
-      assertEquals("incorrect value; doc " + doc, sortedValues[doc].intValue(), Integer.parseInt(sortedReader.document(doc).get(ID_FIELD)));
+      assertEquals("incorrect value; doc " + doc, sortedValues[doc].intValue(), Integer.parseInt(sortedReader.document(doc).getString(ID_FIELD)));
       while (++prev < doc) {
         assertFalse("document " + prev + " not marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(prev));
       }
@@ -339,7 +321,7 @@ public abstract class SorterTestBase ext
     prev = -1;
     while ((doc = docs.advance(doc + 1)) != DocIdSetIterator.NO_MORE_DOCS) {
       assertTrue("document " + doc + " marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(doc));
-      assertEquals("incorrect value; doc " + doc, sortedValues[doc].intValue(), Integer.parseInt(sortedReader.document(doc).get(ID_FIELD)));
+      assertEquals("incorrect value; doc " + doc, sortedValues[doc].intValue(), Integer.parseInt(sortedReader.document(doc).getString(ID_FIELD)));
       while (++prev < doc) {
         assertFalse("document " + prev + " not marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(prev));
       }

Copied: lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java (from r1649173, lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java?p2=lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java&p1=lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java&r1=1649173&r2=1649347&rev=1649347&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/TestBlockJoinSorter.java Sun Jan  4 14:53:12 2015
@@ -23,12 +23,9 @@ import java.util.List;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
@@ -70,20 +67,17 @@ public class TestBlockJoinSorter extends
     IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
     cfg.setMergePolicy(newLogMergePolicy());
     final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory(), cfg);
-    final Document parentDoc = new Document();
-    final NumericDocValuesField parentVal = new NumericDocValuesField("parent_val", 0L);
-    parentDoc.add(parentVal);
-    final StringField parent = new StringField("parent", "true", Store.YES);
-    parentDoc.add(parent);
     for (int i = 0; i < numParents; ++i) {
       List<Document> documents = new ArrayList<>();
       final int numChildren = random().nextInt(10);
       for (int j = 0; j < numChildren; ++j) {
-        final Document childDoc = new Document();
-        childDoc.add(new NumericDocValuesField("child_val", random().nextInt(5)));
+        final Document childDoc = writer.newDocument();
+        childDoc.addInt("child_val", random().nextInt(5));
         documents.add(childDoc);
       }
-      parentVal.setLongValue(random().nextInt(50));
+      final Document parentDoc = writer.newDocument();
+      parentDoc.addLong("parent_val", random().nextInt(50));
+      parentDoc.addAtom("parent", "true");
       documents.add(parentDoc);
       writer.addDocuments(documents);
     }

Copied: lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java (from r1649173, lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java?p2=lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java&p1=lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java&r1=1649173&r2=1649347&rev=1649347&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/index/TestSortingMergePolicy.java Sun Jan  4 14:53:12 2015
@@ -26,13 +26,14 @@ import java.util.Set;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.document.FieldTypes;
 import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LogMergePolicy;
 import org.apache.lucene.index.MergePolicy;
 import org.apache.lucene.index.NumericDocValues;
@@ -45,7 +46,6 @@ import org.apache.lucene.search.SortFiel
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
-
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
 
 public class TestSortingMergePolicy extends LuceneTestCase {
@@ -63,11 +63,34 @@ public class TestSortingMergePolicy exte
     createRandomIndexes();
   }
 
-  private Document randomDocument() {
-    final Document doc = new Document();
-    doc.add(new NumericDocValuesField("ndv", random().nextLong()));
-    doc.add(new StringField("s", RandomPicks.randomFrom(random(), terms), Store.YES));
-    return doc;
+  private void addRandomDocument(RandomIndexWriter w1, RandomIndexWriter w2) throws IOException {
+    long num = random().nextLong();
+    String term = RandomPicks.randomFrom(random(), terms);
+
+    Document doc = w1.newDocument();
+    doc.addLong("ndv", num);
+    doc.addAtom("s", term);
+    w1.addDocument(doc);
+
+    doc = w2.newDocument();
+    doc.addLong("ndv", num);
+    doc.addAtom("s", term);
+    w2.addDocument(doc);
+  }
+
+  private void addRandomDocument(IndexWriter w1, IndexWriter w2) throws IOException {
+    long num = random().nextLong();
+    String term = RandomPicks.randomFrom(random(), terms);
+
+    Document doc = w1.newDocument();
+    doc.addLong("ndv", num);
+    doc.addAtom("s", term);
+    w1.addDocument(doc);
+
+    doc = w2.newDocument();
+    doc.addLong("ndv", num);
+    doc.addAtom("s", term);
+    w2.addDocument(doc);
   }
 
   public static SortingMergePolicy newSortingMergePolicy(Sort sort) {
@@ -107,16 +130,22 @@ public class TestSortingMergePolicy exte
     final IndexWriterConfig iwc2 = newIndexWriterConfig(new MockAnalyzer(new Random(seed)));
     iwc2.setMergePolicy(newSortingMergePolicy(sort));
     final RandomIndexWriter iw1 = new RandomIndexWriter(new Random(seed), dir1, iwc1);
+    FieldTypes fieldTypes = iw1.getFieldTypes();
+    fieldTypes.setIndexOptions("ndv", IndexOptions.NONE);
+    fieldTypes.disableStored("ndv");
+    fieldTypes.setDocValuesType("s", DocValuesType.NONE);
     final RandomIndexWriter iw2 = new RandomIndexWriter(new Random(seed), dir2, iwc2);
+    fieldTypes = iw2.getFieldTypes();
+    fieldTypes.setIndexOptions("ndv", IndexOptions.NONE);
+    fieldTypes.disableStored("ndv");
+    fieldTypes.setDocValuesType("s", DocValuesType.NONE);
     for (int i = 0; i < numDocs; ++i) {
       if (random().nextInt(5) == 0 && i != numDocs - 1) {
         final String term = RandomPicks.randomFrom(random(), terms);
         iw1.deleteDocuments(new Term("s", term));
         iw2.deleteDocuments(new Term("s", term));
       }
-      final Document doc = randomDocument();
-      iw1.addDocument(doc);
-      iw2.addDocument(doc);
+      addRandomDocument(iw1, iw2);
       if (random().nextInt(8) == 0) {
         iw1.commit();
         iw2.commit();
@@ -125,15 +154,13 @@ public class TestSortingMergePolicy exte
     // Make sure we have something to merge
     iw1.commit();
     iw2.commit();
-    final Document doc = randomDocument();
     // NOTE: don't use RIW.addDocument directly, since it sometimes commits
     // which may trigger a merge, at which case forceMerge may not do anything.
     // With field updates this is a problem, since the updates can go into the
     // single segment in the index, and threefore the index won't be sorted.
     // This hurts the assumption of the test later on, that the index is sorted
     // by SortingMP.
-    iw1.w.addDocument(doc);
-    iw2.w.addDocument(doc);
+    addRandomDocument(iw1.w, iw2.w);
 
     // update NDV of docs belonging to one term (covers many documents)
     final long value = random().nextLong();

Copied: lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java (from r1649173, lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java)
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java?p2=lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java&p1=lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java&r1=1649173&r2=1649347&rev=1649347&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java (original)
+++ lucene/dev/branches/lucene6005/lucene/misc/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java Sun Jan  4 14:53:12 2015
@@ -26,9 +26,6 @@ import java.util.Set;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.document.StringField;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriterConfig;
@@ -68,11 +65,11 @@ public class TestEarlyTerminatingSorting
     sort = new Sort(new SortField("ndv1", SortField.Type.LONG));
   }
 
-  private Document randomDocument() {
-    final Document doc = new Document();
-    doc.add(new NumericDocValuesField("ndv1", random().nextInt(10)));
-    doc.add(new NumericDocValuesField("ndv2", random().nextInt(10)));
-    doc.add(new StringField("s", RandomPicks.randomFrom(random(), terms), Store.YES));
+  private Document randomDocument(RandomIndexWriter iw) {
+    final Document doc = iw.newDocument();
+    doc.addInt("ndv1", random().nextInt(10));
+    doc.addInt("ndv2", random().nextInt(10));
+    doc.addAtom("s", RandomPicks.randomFrom(random(), terms));
     return doc;
   }
 
@@ -93,7 +90,7 @@ public class TestEarlyTerminatingSorting
     iw = new RandomIndexWriter(new Random(seed), dir, iwc);
     iw.setDoRandomForceMerge(false); // don't do this, it may happen anyway with MockRandomMP
     for (int i = 0; i < numDocs; ++i) {
-      final Document doc = randomDocument();
+      final Document doc = randomDocument(iw);
       iw.addDocument(doc);
       if (i == numDocs / 2 || (i != numDocs - 1 && random().nextInt(8) == 0)) {
         iw.commit();

Modified: lucene/dev/branches/lucene6005/lucene/module-build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/module-build.xml?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/module-build.xml (original)
+++ lucene/dev/branches/lucene6005/lucene/module-build.xml Sun Jan  4 14:53:12 2015
@@ -68,7 +68,7 @@
   <macrodef name="invoke-module-javadoc">
     <!-- additional links for dependencies to other modules -->
       <element name="links" optional="yes"/>
-    <!-- link source (don't do this unless its example code) -->
+    <!-- link source (don't do this unless it's example code) -->
       <attribute name="linksource" default="no"/>
     <sequential>
       <mkdir dir="${javadoc.dir}/${name}"/>

Modified: lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MaxFloatFunction.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MaxFloatFunction.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MaxFloatFunction.java (original)
+++ lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MaxFloatFunction.java Sun Jan  4 14:53:12 2015
@@ -21,7 +21,7 @@ import org.apache.lucene.queries.functio
 import org.apache.lucene.queries.function.ValueSource;
 
 /**
- * <code>MaxFloatFunction</code> returns the max of it's components.
+ * <code>MaxFloatFunction</code> returns the max of its components.
  */
 public class MaxFloatFunction extends MultiFloatFunction {
   public MaxFloatFunction(ValueSource[] sources) {

Modified: lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MinFloatFunction.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MinFloatFunction.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MinFloatFunction.java (original)
+++ lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/MinFloatFunction.java Sun Jan  4 14:53:12 2015
@@ -21,7 +21,7 @@ import org.apache.lucene.queries.functio
 import org.apache.lucene.queries.function.ValueSource;
 
 /**
- * <code>MinFloatFunction</code> returns the min of it's components.
+ * <code>MinFloatFunction</code> returns the min of its components.
  */
 public class MinFloatFunction extends MultiFloatFunction {
   public MinFloatFunction(ValueSource[] sources) {

Modified: lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ProductFloatFunction.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ProductFloatFunction.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ProductFloatFunction.java (original)
+++ lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ProductFloatFunction.java Sun Jan  4 14:53:12 2015
@@ -21,7 +21,7 @@ import org.apache.lucene.queries.functio
 import org.apache.lucene.queries.function.ValueSource;
 
 /**
- * <code>ProductFloatFunction</code> returns the product of it's components.
+ * <code>ProductFloatFunction</code> returns the product of its components.
  */
 public class ProductFloatFunction extends MultiFloatFunction {
   public ProductFloatFunction(ValueSource[] sources) {

Modified: lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SumFloatFunction.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SumFloatFunction.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SumFloatFunction.java (original)
+++ lucene/dev/branches/lucene6005/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SumFloatFunction.java Sun Jan  4 14:53:12 2015
@@ -21,7 +21,7 @@ import org.apache.lucene.queries.functio
 import org.apache.lucene.queries.function.ValueSource;
 
 /**
- * <code>SumFloatFunction</code> returns the sum of it's components.
+ * <code>SumFloatFunction</code> returns the sum of its components.
  */
 public class SumFloatFunction extends MultiFloatFunction {
   public SumFloatFunction(ValueSource[] sources) {

Modified: lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java (original)
+++ lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java Sun Jan  4 14:53:12 2015
@@ -530,7 +530,7 @@ public class CommonTermsQueryTest extend
   public static void createRandomIndex(int numdocs, RandomIndexWriter writer,
       long seed) throws IOException {
     Random random = new Random(seed);
-    // primary source for our data is from linefiledocs, its realistic.
+    // primary source for our data is from linefiledocs, it's realistic.
     LineFileDocs lineFileDocs = new LineFileDocs(writer.w, random);
     
     // TODO: we should add other fields that use things like docs&freqs but omit

Modified: lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java (original)
+++ lucene/dev/branches/lucene6005/lucene/queries/src/test/org/apache/lucene/queries/function/TestValueSources.java Sun Jan  4 14:53:12 2015
@@ -299,7 +299,7 @@ public class TestValueSources extends Lu
       ValueSource vs = new NormValueSource("byte");
       assertHits(new FunctionQuery(vs), new float[] { 0f, 0f });
 
-      // regardless of wether norms exist, value source exists == 0
+      // regardless of whether norms exist, value source exists == 0
       assertAllExist(vs);
 
       vs = new NormValueSource("text");
@@ -444,7 +444,7 @@ public class TestValueSources extends Lu
     assertHits(new FunctionQuery(vs), new float[] { 0f, 1f });
     assertAllExist(vs);
                
-    // regardless of wether norms exist, value source exists == 0
+    // regardless of whether norms exist, value source exists == 0
     vs = new TermFreqValueSource("bogus", "bogus", "bogus", new BytesRef("bogus"));
     assertHits(new FunctionQuery(vs), new float[] { 0F, 0F });
     assertAllExist(vs);
@@ -465,7 +465,7 @@ public class TestValueSources extends Lu
       assertHits(new FunctionQuery(vs), new float[] { 0f, 1f });
       assertAllExist(vs);
       
-      // regardless of wether norms exist, value source exists == 0
+      // regardless of whether norms exist, value source exists == 0
       vs = new TFValueSource("bogus", "bogus", "bogus", new BytesRef("bogus"));
       assertHits(new FunctionQuery(vs), new float[] { 0F, 0F });
       assertAllExist(vs);

Modified: lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneContribQuery.dtd.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneContribQuery.dtd.html?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneContribQuery.dtd.html (original)
+++ lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneContribQuery.dtd.html Sun Jan  4 14:53:12 2015
@@ -219,7 +219,7 @@ Child of <a href='#BoostQuery'>BoostQuer
 <li> as a Clause in a BooleanQuery who's only other clause
 is a "mustNot" match (Lucene requires at least one positive clause) and..</li>
 <li> in a FilteredQuery where a Filter tag is effectively being
-used to select content rather than it's usual role of filtering the results of a query.</li>
+used to select content rather than its usual role of filtering the results of a query.</li>
 </ol></p><p><span class='inTextTitle'>Example:</span> <em>Effectively use a Filter as a query </em>
 </p><pre>	          
                &lt;FilteredQuery&gt;

Modified: lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneCoreQuery.dtd.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneCoreQuery.dtd.html?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneCoreQuery.dtd.html (original)
+++ lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneCoreQuery.dtd.html Sun Jan  4 14:53:12 2015
@@ -225,7 +225,7 @@ Child of <a href='#Clause'>Clause</a>, <
 <li> as a Clause in a BooleanQuery who's only other clause
 is a "mustNot" match (Lucene requires at least one positive clause) and..</li>
 <li> in a FilteredQuery where a Filter tag is effectively being
-used to select content rather than it's usual role of filtering the results of a query.</li>
+used to select content rather than its usual role of filtering the results of a query.</li>
 </ol></p><p><span class='inTextTitle'>Example:</span> <em>Effectively use a Filter as a query </em>
 </p><pre>	          
                &lt;FilteredQuery&gt;

Modified: lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneCoreQuery.dtd.org.html
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneCoreQuery.dtd.org.html?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneCoreQuery.dtd.org.html (original)
+++ lucene/dev/branches/lucene6005/lucene/queryparser/docs/xml/LuceneCoreQuery.dtd.org.html Sun Jan  4 14:53:12 2015
@@ -159,7 +159,7 @@
 <span class="dtd_comment">    &lt;li&gt; as a Clause in a BooleanQuery who's only other clause</span>
 <span class="dtd_comment">    is a &quot;mustNot&quot; match (Lucene requires at least one positive clause) and..&lt;/li&gt;</span>
 <span class="dtd_comment">    &lt;li&gt; in a FilteredQuery where a Filter tag is effectively being </span>
-<span class="dtd_comment">    used to select content rather than it's usual role of filtering the results of a query.&lt;/li&gt;</span>
+<span class="dtd_comment">    used to select content rather than its usual role of filtering the results of a query.&lt;/li&gt;</span>
 <span class="dtd_comment">    &lt;/ol&gt;</span>
 <span class="dtd_comment">    </span>
 <span class="dtd_comment">    </span><span class="dtd_dtddoc_tag">@example</span><span class="dtd_comment"> </span>

Modified: lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java (original)
+++ lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java Sun Jan  4 14:53:12 2015
@@ -130,9 +130,9 @@ public class AnalyzerQueryNodeProcessor
       
       try {
         try (TokenStream source = this.analyzer.tokenStream(field, text)) {
-          source.reset();
           buffer = new CachingTokenFilter(source);
-  
+          buffer.reset();
+
           if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
             posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
           }
@@ -155,13 +155,13 @@ public class AnalyzerQueryNodeProcessor
           } catch (IOException e) {
             // ignore
           }
+
+          // rewind the buffer stream
+          buffer.reset();//will never through on subsequent reset calls
         } catch (IOException e) {
           throw new RuntimeException(e);
         }
-        
-        // rewind the buffer stream
-        buffer.reset();
-  
+
         if (!buffer.hasAttribute(CharTermAttribute.class)) {
           return new NoTokenFoundQueryNode();
         }

Modified: lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java (original)
+++ lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java Sun Jan  4 14:53:12 2015
@@ -36,7 +36,7 @@ import org.apache.lucene.queryparser.sur
  *   c)</code> notation. </p>
  
  *  <p>The W and N operators express a positional relationship among their
- *  operands.  N is ordered, and W is unordered.  The distance is 1 by
+ *  operands.  W is ordered, and N is unordered.  The distance is 1 by
  *  default, meaning the operands are adjacent, or may be provided as a
  *  prefix from 2-99.  So, for example, 3W(a, b) means that terms a and b
  *  must appear within three positions of each other, or in other words, up

Modified: lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj (original)
+++ lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj Sun Jan  4 14:53:12 2015
@@ -65,7 +65,7 @@ import org.apache.lucene.queryparser.sur
  *   c)</code> notation. </p>
  
  *  <p>The W and N operators express a positional relationship among their
- *  operands.  N is ordered, and W is unordered.  The distance is 1 by
+ *  operands.  W is ordered, and N is unordered.  The distance is 1 by
  *  default, meaning the operands are adjacent, or may be provided as a
  *  prefix from 2-99.  So, for example, 3W(a, b) means that terms a and b
  *  must appear within three positions of each other, or in other words, up

Modified: lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java (original)
+++ lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java Sun Jan  4 14:53:12 2015
@@ -46,7 +46,7 @@ Operations:
    Are SpanQuery weights handled correctly during search by Lucene?
    Should the resulting SpanOrQuery be sorted?
    Could other SpanQueries be added for use in this factory:
-   - SpanOrQuery: in principle yes, but it only has access to it's terms
+   - SpanOrQuery: in principle yes, but it only has access to its terms
                   via getTerms(); are the corresponding weights available?
    - SpanFirstQuery: treat similar to subquery SpanNearQuery. (ok?)
    - SpanNotQuery: treat similar to subquery SpanNearQuery. (ok?)

Modified: lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java (original)
+++ lucene/dev/branches/lucene6005/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java Sun Jan  4 14:53:12 2015
@@ -81,7 +81,6 @@ public class LikeThisQueryBuilder implem
             stopWordsSet.add(termAtt.toString());
           }
           ts.end();
-          ts.close();
         } catch (IOException ioe) {
           throw new ParserException("IoException parsing stop words list in "
               + getClass().getName() + ":" + ioe.getLocalizedMessage());

Modified: lucene/dev/branches/lucene6005/lucene/queryparser/src/resources/org/apache/lucene/queryparser/xml/LuceneCoreQuery.dtd
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/queryparser/src/resources/org/apache/lucene/queryparser/xml/LuceneCoreQuery.dtd?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/queryparser/src/resources/org/apache/lucene/queryparser/xml/LuceneCoreQuery.dtd (original)
+++ lucene/dev/branches/lucene6005/lucene/queryparser/src/resources/org/apache/lucene/queryparser/xml/LuceneCoreQuery.dtd Sun Jan  4 14:53:12 2015
@@ -153,7 +153,7 @@ Passes content directly through to the s
 	<li> as a Clause in a BooleanQuery who's only other clause
 	is a "mustNot" match (Lucene requires at least one positive clause) and..</li>
 	<li> in a FilteredQuery where a Filter tag is effectively being 
-	used to select content rather than it's usual role of filtering the results of a query.</li>
+	used to select content rather than its usual role of filtering the results of a query.</li>
 	</ol>
 	
 	@example 

Modified: lucene/dev/branches/lucene6005/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java (original)
+++ lucene/dev/branches/lucene6005/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyReplicationHandler.java Sun Jan  4 14:53:12 2015
@@ -132,9 +132,9 @@ public class IndexAndTaxonomyReplication
       // reader sees a more advanced taxonomy than the index.
       
       if (taxoSegmentsFile != null) {
-        taxoClientDir.copy(taxoDir, taxoSegmentsFile, taxoPendingFile, IOContext.READONCE);
+        taxoDir.copyFrom(taxoClientDir, taxoSegmentsFile, taxoPendingFile, IOContext.READONCE);
       }
-      indexClientDir.copy(indexDir, indexSegmentsFile, indexPendingFile, IOContext.READONCE);
+      indexDir.copyFrom(indexClientDir, indexSegmentsFile, indexPendingFile, IOContext.READONCE);
       
       if (taxoSegmentsFile != null) {
         taxoDir.sync(Collections.singletonList(taxoPendingFile));

Modified: lucene/dev/branches/lucene6005/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/lucene6005/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java?rev=1649347&r1=1649346&r2=1649347&view=diff
==============================================================================
--- lucene/dev/branches/lucene6005/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java (original)
+++ lucene/dev/branches/lucene6005/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java Sun Jan  4 14:53:12 2015
@@ -173,7 +173,7 @@ public class IndexReplicationHandler imp
   public static void copyFiles(Directory source, Directory target, List<String> files) throws IOException {
     if (!source.equals(target)) {
       for (String file : files) {
-        source.copy(target, file, file, IOContext.READONCE);
+        target.copyFrom(source, file, file, IOContext.READONCE);
       }
     }
   }
@@ -232,7 +232,7 @@ public class IndexReplicationHandler imp
       indexDir.sync(files);
       
       // now copy and fsync segmentsFile as pending, then rename (simulating lucene commit)
-      clientDir.copy(indexDir, segmentsFile, pendingSegmentsFile, IOContext.READONCE);
+      indexDir.copyFrom(clientDir, segmentsFile, pendingSegmentsFile, IOContext.READONCE);
       indexDir.sync(Collections.singletonList(pendingSegmentsFile));
       indexDir.renameFile(pendingSegmentsFile, segmentsFile);