You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by yo...@apache.org on 2011/11/14 23:36:32 UTC

svn commit: r1201946 [7/14] - in /lucene/dev/branches/solrcloud: ./ dev-tools/idea/lucene/contrib/ lucene/ lucene/contrib/ lucene/contrib/demo/src/java/org/apache/lucene/demo/ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/ luce...

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java Mon Nov 14 22:36:20 2011
@@ -18,6 +18,7 @@ package org.apache.lucene.search.spans;
  */
 
 import java.io.IOException;
+import java.util.Map;
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
@@ -90,7 +91,7 @@ public class SpanMultiTermQueryWrapper<Q
   }
   
   @Override
-  public Spans getSpans(AtomicReaderContext context, Bits acceptDocs) throws IOException {
+  public Spans getSpans(AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts) throws IOException {
     throw new UnsupportedOperationException("Query should have been rewritten");
   }
 
@@ -157,6 +158,9 @@ public class SpanMultiTermQueryWrapper<Q
     
       @Override
       protected void addClause(SpanOrQuery topLevel, Term term, int docCount, float boost, TermContext states) {
+        // TODO: would be nice to not lose term-state here.
+        // we could add a hack option to SpanOrQuery, but the hack would only work if this is the top-level Span
+        // (if you put this thing in another span query, it would extractTerms/double-seek anyway)
         final SpanTermQuery q = new SpanTermQuery(term);
         q.setBoost(boost);
         topLevel.addClause(q);

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanNearQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanNearQuery.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanNearQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanNearQuery.java Mon Nov 14 22:36:20 2011
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.Map;
 import java.util.Set;
 
 
@@ -31,6 +32,7 @@ import org.apache.lucene.index.IndexRead
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.ToStringUtils;
 
 /** Matches spans which are near one another.  One can specify <i>slop</i>, the
@@ -118,16 +120,16 @@ public class SpanNearQuery extends SpanQ
   }
 
   @Override
-  public Spans getSpans(final AtomicReaderContext context, Bits acceptDocs) throws IOException {
+  public Spans getSpans(final AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts) throws IOException {
     if (clauses.size() == 0)                      // optimize 0-clause case
-      return new SpanOrQuery(getClauses()).getSpans(context, acceptDocs);
+      return new SpanOrQuery(getClauses()).getSpans(context, acceptDocs, termContexts);
 
     if (clauses.size() == 1)                      // optimize 1-clause case
-      return clauses.get(0).getSpans(context, acceptDocs);
+      return clauses.get(0).getSpans(context, acceptDocs, termContexts);
 
     return inOrder
-            ? (Spans) new NearSpansOrdered(this, context, acceptDocs, collectPayloads)
-            : (Spans) new NearSpansUnordered(this, context, acceptDocs);
+            ? (Spans) new NearSpansOrdered(this, context, acceptDocs, termContexts, collectPayloads)
+            : (Spans) new NearSpansUnordered(this, context, acceptDocs, termContexts);
   }
 
   @Override

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanNotQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanNotQuery.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanNotQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanNotQuery.java Mon Nov 14 22:36:20 2011
@@ -22,11 +22,13 @@ import org.apache.lucene.index.IndexRead
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.ToStringUtils;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Map;
 import java.util.Set;
 
 /** Removes matches which overlap with another SpanQuery. */
@@ -76,12 +78,12 @@ public class SpanNotQuery extends SpanQu
   }
 
   @Override
-  public Spans getSpans(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
+  public Spans getSpans(final AtomicReaderContext context, final Bits acceptDocs, final Map<Term,TermContext> termContexts) throws IOException {
     return new Spans() {
-        private Spans includeSpans = include.getSpans(context, acceptDocs);
+        private Spans includeSpans = include.getSpans(context, acceptDocs, termContexts);
         private boolean moreInclude = true;
 
-        private Spans excludeSpans = exclude.getSpans(context, acceptDocs);
+        private Spans excludeSpans = exclude.getSpans(context, acceptDocs, termContexts);
         private boolean moreExclude = excludeSpans.next();
 
         @Override

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanOrQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanOrQuery.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanOrQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanOrQuery.java Mon Nov 14 22:36:20 2011
@@ -23,6 +23,7 @@ import java.util.List;
 import java.util.Collection;
 import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.lucene.index.IndexReader;
@@ -30,6 +31,7 @@ import org.apache.lucene.index.IndexRead
 import org.apache.lucene.index.Term;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.ToStringUtils;
 import org.apache.lucene.search.Query;
 
@@ -164,9 +166,9 @@ public class SpanOrQuery extends SpanQue
   }
 
   @Override
-  public Spans getSpans(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
+  public Spans getSpans(final AtomicReaderContext context, final Bits acceptDocs, final Map<Term,TermContext> termContexts) throws IOException {
     if (clauses.size() == 1)                      // optimize 1-clause case
-      return (clauses.get(0)).getSpans(context, acceptDocs);
+      return (clauses.get(0)).getSpans(context, acceptDocs, termContexts);
 
     return new Spans() {
         private SpanQueue queue = null;
@@ -175,7 +177,7 @@ public class SpanOrQuery extends SpanQue
           queue = new SpanQueue(clauses.size());
           Iterator<SpanQuery> i = clauses.iterator();
           while (i.hasNext()) {
-            Spans spans = i.next().getSpans(context, acceptDocs);
+            Spans spans = i.next().getSpans(context, acceptDocs, termContexts);
             if (   ((target == -1) && spans.next())
                 || ((target != -1) && spans.skipTo(target))) {
               queue.add(spans);

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java Mon Nov 14 22:36:20 2011
@@ -22,10 +22,12 @@ import org.apache.lucene.index.IndexRead
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.TermContext;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Map;
 import java.util.Set;
 
 
@@ -82,8 +84,8 @@ public abstract class SpanPositionCheckQ
   protected abstract AcceptStatus acceptPosition(Spans spans) throws IOException;
 
   @Override
-  public Spans getSpans(final AtomicReaderContext context, Bits acceptDocs) throws IOException {
-    return new PositionCheckSpan(context, acceptDocs);
+  public Spans getSpans(final AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts) throws IOException {
+    return new PositionCheckSpan(context, acceptDocs, termContexts);
   }
 
 
@@ -107,8 +109,8 @@ public abstract class SpanPositionCheckQ
   protected class PositionCheckSpan extends Spans {
     private Spans spans;
 
-    public PositionCheckSpan(AtomicReaderContext context, Bits acceptDocs) throws IOException {
-      spans = match.getSpans(context, acceptDocs);
+    public PositionCheckSpan(AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts) throws IOException {
+      spans = match.getSpans(context, acceptDocs, termContexts);
     }
 
     @Override

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanQuery.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanQuery.java Mon Nov 14 22:36:20 2011
@@ -18,18 +18,21 @@ package org.apache.lucene.search.spans;
  */
 
 import java.io.IOException;
+import java.util.Map;
 
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.TermContext;
 
 /** Base class for span-based queries. */
 public abstract class SpanQuery extends Query {
   /** Expert: Returns the matches for this query in an index.  Used internally
    * to search for spans. */
-  public abstract Spans getSpans(AtomicReaderContext context, Bits acceptDocs) throws IOException;
+  public abstract Spans getSpans(AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts) throws IOException;
 
   /** Returns the name of the field matched by this query.*/
   public abstract String getField();

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java Mon Nov 14 22:36:20 2011
@@ -19,12 +19,19 @@ package org.apache.lucene.search.spans;
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.TermState;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ReaderUtil;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.ToStringUtils;
 
 import java.io.IOException;
+import java.util.Map;
 import java.util.Set;
 
 /** Matches spans containing a term. */
@@ -82,22 +89,46 @@ public class SpanTermQuery extends SpanQ
   }
 
   @Override
-  public Spans getSpans(final AtomicReaderContext context, Bits acceptDocs) throws IOException {
-    final IndexReader reader = context.reader;
-    final DocsAndPositionsEnum postings = reader.termPositionsEnum(acceptDocs,
-                                                                   term.field(),
-                                                                   term.bytes());
+  public Spans getSpans(final AtomicReaderContext context, Bits acceptDocs, Map<Term,TermContext> termContexts) throws IOException {
+    TermContext termContext = termContexts.get(term);
+    final TermState state;
+    if (termContext == null) {
+      // this happens with span-not query, as it doesn't include the NOT side in extractTerms()
+      // so we seek to the term now in this segment..., this sucks because its ugly mostly!
+      final Fields fields = context.reader.fields();
+      if (fields != null) {
+        final Terms terms = fields.terms(term.field());
+        if (terms != null) {
+          final TermsEnum termsEnum = terms.getThreadTermsEnum(); // thread-private don't share!
+          if (termsEnum.seekExact(term.bytes(), true)) { 
+            state = termsEnum.termState();
+          } else {
+            state = null;
+          }
+        } else {
+          state = null;
+        }
+      } else {
+        state = null;
+      }
+    } else {
+      state = termContext.get(context.ord);
+    }
+    
+    if (state == null) { // term is not present in that reader
+      return TermSpans.EMPTY_TERM_SPANS;
+    }
+    
+    final TermsEnum termsEnum = context.reader.terms(term.field()).getThreadTermsEnum();
+    termsEnum.seekExact(term.bytes(), state);
+    
+    final DocsAndPositionsEnum postings = termsEnum.docsAndPositions(acceptDocs, null);
 
     if (postings != null) {
       return new TermSpans(postings, term);
     } else {
-      if (reader.termDocsEnum(reader.getLiveDocs(), term.field(), term.bytes()) != null) {
-        // term does exist, but has no positions
-        throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run SpanTermQuery (term=" + term.text() + ")");
-      } else {
-        // term does not exist
-        return TermSpans.EMPTY_TERM_SPANS;
-      }
+      // term does exist, but has no positions
+      throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run SpanTermQuery (term=" + term.text() + ")");
     }
   }
 }

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java Mon Nov 14 22:36:20 2011
@@ -27,7 +27,8 @@ import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.TermContext;
 
 import java.io.IOException;
-import java.util.Set;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.TreeSet;
 
 /**
@@ -35,7 +36,7 @@ import java.util.TreeSet;
  */
 public class SpanWeight extends Weight {
   protected Similarity similarity;
-  protected Set<Term> terms;
+  protected Map<Term,TermContext> termContexts;
   protected SpanQuery query;
   protected Similarity.Stats stats;
 
@@ -44,15 +45,16 @@ public class SpanWeight extends Weight {
     this.similarity = searcher.getSimilarityProvider().get(query.getField());
     this.query = query;
     
-    terms=new TreeSet<Term>();
+    termContexts = new HashMap<Term,TermContext>();
+    TreeSet<Term> terms = new TreeSet<Term>();
     query.extractTerms(terms);
     final ReaderContext context = searcher.getTopReaderContext();
-    final TermContext states[] = new TermContext[terms.size()];
     final TermStatistics termStats[] = new TermStatistics[terms.size()];
     int i = 0;
     for (Term term : terms) {
-      states[i] = TermContext.build(context, term, true);
-      termStats[i] = searcher.termStatistics(term, states[i]);
+      TermContext state = TermContext.build(context, term, true);
+      termStats[i] = searcher.termStatistics(term, state);
+      termContexts.put(term, state);
       i++;
     }
     stats = similarity.computeStats(
@@ -77,7 +79,7 @@ public class SpanWeight extends Weight {
   @Override
   public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
       boolean topScorer, Bits acceptDocs) throws IOException {
-    return new SpanScorer(query.getSpans(context, acceptDocs), this, similarity.sloppyDocScorer(stats, query.getField(), context));
+    return new SpanScorer(query.getSpans(context, acceptDocs, termContexts), this, similarity.sloppyDocScorer(stats, query.getField(), context));
   }
 
   @Override

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/FlushInfo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/FlushInfo.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/FlushInfo.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/FlushInfo.java Mon Nov 14 22:36:20 2011
@@ -18,7 +18,7 @@ package org.apache.lucene.store;
  */
 
 /**
- * <p>A FlushInfo provides information required for a FLUSH context and other optimization operations.
+ * <p>A FlushInfo provides information required for a FLUSH context.
  *  It is used as part of an {@link IOContext} in case of FLUSH context.</p>
  */
 

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/MergeInfo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/MergeInfo.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/MergeInfo.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/MergeInfo.java Mon Nov 14 22:36:20 2011
@@ -17,7 +17,7 @@ package org.apache.lucene.store;
  */
 
 /**
- * <p>A MergeInfo provides information required for a MERGE context and other optimization operations.
+ * <p>A MergeInfo provides information required for a MERGE context.
  *  It is used as part of an {@link IOContext} in case of MERGE context.</p>
  */
 
@@ -29,7 +29,7 @@ public class MergeInfo {
   
   public final boolean isExternal;
   
-  public final boolean optimize;
+  public final int mergeMaxNumSegments;
   
 
   /**
@@ -40,11 +40,11 @@ public class MergeInfo {
    * 
    */
 
-  public MergeInfo(int totalDocCount, long estimatedMergeBytes, boolean isExternal, boolean optimize) {
+  public MergeInfo(int totalDocCount, long estimatedMergeBytes, boolean isExternal, int mergeMaxNumSegments) {
     this.totalDocCount = totalDocCount;
     this.estimatedMergeBytes = estimatedMergeBytes;
     this.isExternal = isExternal;
-    this.optimize = optimize;
+    this.mergeMaxNumSegments = mergeMaxNumSegments;
   }
 
 
@@ -55,7 +55,7 @@ public class MergeInfo {
     result = prime * result
         + (int) (estimatedMergeBytes ^ (estimatedMergeBytes >>> 32));
     result = prime * result + (isExternal ? 1231 : 1237);
-    result = prime * result + (optimize ? 1231 : 1237);
+    result = prime * result + mergeMaxNumSegments;
     result = prime * result + totalDocCount;
     return result;
   }
@@ -73,7 +73,7 @@ public class MergeInfo {
       return false;
     if (isExternal != other.isExternal)
       return false;
-    if (optimize != other.optimize)
+    if (mergeMaxNumSegments != other.mergeMaxNumSegments)
       return false;
     if (totalDocCount != other.totalDocCount)
       return false;
@@ -84,6 +84,6 @@ public class MergeInfo {
   public String toString() {
     return "MergeInfo [totalDocCount=" + totalDocCount
         + ", estimatedMergeBytes=" + estimatedMergeBytes + ", isExternal="
-        + isExternal + ", optimize=" + optimize + "]";
+        + isExternal + ", mergeMaxNumSegments=" + mergeMaxNumSegments + "]";
   }
-}
\ No newline at end of file
+}

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/NIOFSDirectory.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/NIOFSDirectory.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/NIOFSDirectory.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/NIOFSDirectory.java Mon Nov 14 22:36:20 2011
@@ -18,6 +18,7 @@ package org.apache.lucene.store;
  */
 
 import java.io.File;
+import java.io.EOFException;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException; // javadoc @link
@@ -181,7 +182,7 @@ public class NIOFSDirectory extends FSDi
       long pos = getFilePointer() + off;
       
       if (pos + len > end) {
-        throw new IOException("read past EOF: " + this);
+        throw new EOFException("read past EOF (resource: " + this + ")");
       }
 
       try {

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/RAMInputStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/RAMInputStream.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/RAMInputStream.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/store/RAMInputStream.java Mon Nov 14 22:36:20 2011
@@ -91,7 +91,7 @@ public class RAMInputStream extends Inde
     if (currentBufferIndex >= file.numBuffers()) {
       // end of file reached, no more buffers left
       if (enforceEOF) {
-        throw new EOFException("Read past EOF: " + this);
+        throw new EOFException("Read past EOF (resource: " + this + ")");
       } else {
         // Force EOF if a read takes place at this position
         currentBufferIndex--;

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/ArrayUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/ArrayUtil.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/ArrayUtil.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/ArrayUtil.java Mon Nov 14 22:36:20 2011
@@ -509,6 +509,32 @@ public final class ArrayUtil {
     }
     return false;
   }
+  
+  // Since Arrays.equals doesn't implement offsets for equals
+  /**
+   * See if two array slices are the same.
+   *
+   * @param left        The left array to compare
+   * @param offsetLeft  The offset into the array.  Must be positive
+   * @param right       The right array to compare
+   * @param offsetRight the offset into the right array.  Must be positive
+   * @param length      The length of the section of the array to compare
+   * @return true if the two arrays, starting at their respective offsets, are equal
+   * 
+   * @see java.util.Arrays#equals(byte[], byte[])
+   */
+  public static boolean equals(byte[] left, int offsetLeft, byte[] right, int offsetRight, int length) {
+    if ((offsetLeft + length <= left.length) && (offsetRight + length <= right.length)) {
+      for (int i = 0; i < length; i++) {
+        if (left[offsetLeft + i] != right[offsetRight + i]) {
+          return false;
+        }
+
+      }
+      return true;
+    }
+    return false;
+  }
 
   /* DISABLE THIS FOR NOW: This has performance problems until Java creates intrinsics for Class#getComponentType() and Array.newInstance()
   public static <T> T[] grow(T[] array, int minSize) {

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/Constants.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/Constants.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/Constants.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/Constants.java Mon Nov 14 22:36:20 2011
@@ -28,12 +28,6 @@ public final class Constants {
 
   /** The value of <tt>System.getProperty("java.version")<tt>. **/
   public static final String JAVA_VERSION = System.getProperty("java.version");
-  /** True iff this is Java version 1.1. */
-  public static final boolean JAVA_1_1 = JAVA_VERSION.startsWith("1.1.");
-  /** True iff this is Java version 1.2. */
-  public static final boolean JAVA_1_2 = JAVA_VERSION.startsWith("1.2.");
-  /** True iff this is Java version 1.3. */
-  public static final boolean JAVA_1_3 = JAVA_VERSION.startsWith("1.3.");
  
   /** The value of <tt>System.getProperty("os.name")<tt>. **/
   public static final String OS_NAME = System.getProperty("os.name");
@@ -50,11 +44,17 @@ public final class Constants {
   public static final String OS_VERSION = System.getProperty("os.version");
   public static final String JAVA_VENDOR = System.getProperty("java.vendor");
 
-  // NOTE: this logic may not be correct; if you know of a
-  // more reliable approach please raise it on java-dev!
-  public static final boolean JRE_IS_64BIT;
+  /** @deprecated With Lucene 4.0, we are always on Java 6 */
+  @Deprecated
+  public static final boolean JRE_IS_MINIMUM_JAVA6 =
+    new Boolean(true).booleanValue(); // prevent inlining in foreign class files
+
+  public static final boolean JRE_IS_64BIT;  
+  public static final boolean JRE_IS_MINIMUM_JAVA7;
   static {
-    String x = System.getProperty("sun.arch.data.model");
+    // NOTE: this logic may not be correct; if you know of a
+    // more reliable approach please raise it on java-dev!
+    final String x = System.getProperty("sun.arch.data.model");
     if (x != null) {
       JRE_IS_64BIT = x.indexOf("64") != -1;
     } else {
@@ -64,6 +64,15 @@ public final class Constants {
         JRE_IS_64BIT = false;
       }
     }
+    
+    // this method only exists in Java 7:
+    boolean v7 = true;
+    try {
+      Throwable.class.getMethod("getSuppressed");
+    } catch (NoSuchMethodException nsme) {
+      v7 = false;
+    }
+    JRE_IS_MINIMUM_JAVA7 = v7;
   }
 
   // this method prevents inlining the final version constant in compiled classes,

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/Counter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/Counter.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/Counter.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/Counter.java Mon Nov 14 22:36:20 2011
@@ -77,7 +77,7 @@ public abstract class Counter {
   }
 
   private final static class AtomicCounter extends Counter {
-    private AtomicLong count;
+    private final AtomicLong count = new AtomicLong();
 
     @Override
     public long addAndGet(long delta) {

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java Mon Nov 14 22:36:20 2011
@@ -119,6 +119,13 @@ public final class FieldCacheSanityCheck
       final CacheEntry item = cacheEntries[i];
       final Object val = item.getValue();
 
+      // It's OK to have dup entries, where one is eg
+      // float[] and the other is the Bits (from
+      // getDocWithField())
+      if (val instanceof Bits) {
+        continue;
+      }
+
       if (val instanceof FieldCache.CreationPlaceholder)
         continue;
 
@@ -208,7 +215,7 @@ public final class FieldCacheSanityCheck
       
       if (seen.contains(rf)) continue;
 
-      List<Object> kids = getAllDescendentReaderKeys(rf.readerKey);
+      List<Object> kids = getAllDescendantReaderKeys(rf.readerKey);
       for (Object kidKey : kids) {
         ReaderField kid = new ReaderField(kidKey, rf.fieldName);
         
@@ -266,7 +273,7 @@ public final class FieldCacheSanityCheck
    * the hierarchy of subReaders building up a list of the objects 
    * returned by obj.getFieldCacheKey()
    */
-  private List<Object> getAllDescendentReaderKeys(Object seed) {
+  private List<Object> getAllDescendantReaderKeys(Object seed) {
     List<Object> all = new ArrayList<Object>(17); // will grow as we iter
     all.add(seed);
     for (int i = 0; i < all.size(); i++) {

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/IOUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/IOUtils.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/IOUtils.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/IOUtils.java Mon Nov 14 22:36:20 2011
@@ -17,15 +17,35 @@ package org.apache.lucene.util;
  * limitations under the License.
  */
 
+import java.io.BufferedReader;
 import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Reader;
 import java.lang.reflect.Method;
+import java.nio.charset.Charset;
+import java.nio.charset.CharsetDecoder;
+import java.nio.charset.CodingErrorAction;
 
 /** This class emulates the new Java 7 "Try-With-Resources" statement.
  * Remove once Lucene is on Java 7.
  * @lucene.internal */
 public final class IOUtils {
-
+  
+  /**
+   * UTF-8 charset string
+   * @see Charset#forName(String)
+   */
+  public static final String UTF_8 = "UTF-8";
+  
+  /**
+   * UTF-8 {@link Charset} instance to prevent repeated
+   * {@link Charset#forName(String)} lookups
+   */
+  public static final Charset CHARSET_UTF_8 = Charset.forName("UTF-8");
   private IOUtils() {} // no instance
 
   /**
@@ -220,5 +240,84 @@ public final class IOUtils {
       }
     }
   }
+  
+  /**
+   * Wrapping the given {@link InputStream} in a reader using a {@link CharsetDecoder}.
+   * Unlike Java's defaults this reader will throw an exception if your it detects 
+   * the read charset doesn't match the expected {@link Charset}. 
+   * <p>
+   * Decoding readers are useful to load configuration files, stopword lists or synonym files
+   * to detect character set problems. However, its not recommended to use as a common purpose 
+   * reader.
+   * 
+   * @param stream the stream to wrap in a reader
+   * @param charSet the expected charset
+   * @return a wrapping reader
+   */
+  public static Reader getDecodingReader(InputStream stream, Charset charSet) {
+    final CharsetDecoder charSetDecoder = charSet.newDecoder()
+        .onMalformedInput(CodingErrorAction.REPORT)
+        .onUnmappableCharacter(CodingErrorAction.REPORT);
+    return new BufferedReader(new InputStreamReader(stream, charSetDecoder));
+  }
+  
+  /**
+   * Opens a Reader for the given {@link File} using a {@link CharsetDecoder}.
+   * Unlike Java's defaults this reader will throw an exception if your it detects 
+   * the read charset doesn't match the expected {@link Charset}. 
+   * <p>
+   * Decoding readers are useful to load configuration files, stopword lists or synonym files
+   * to detect character set problems. However, its not recommended to use as a common purpose 
+   * reader.
+   * @param file the file to open a reader on
+   * @param charSet the expected charset
+   * @return a reader to read the given file
+   */
+  public static Reader getDecodingReader(File file, Charset charSet) throws IOException {
+    FileInputStream stream = null;
+    boolean success = false;
+    try {
+      stream = new FileInputStream(file);
+      final Reader reader = getDecodingReader(stream, charSet);
+      success = true;
+      return reader;
+
+    } finally {
+      if (!success) {
+        IOUtils.close(stream);
+      }
+    }
+  }
+
+  /**
+   * Opens a Reader for the given resource using a {@link CharsetDecoder}.
+   * Unlike Java's defaults this reader will throw an exception if your it detects 
+   * the read charset doesn't match the expected {@link Charset}. 
+   * <p>
+   * Decoding readers are useful to load configuration files, stopword lists or synonym files
+   * to detect character set problems. However, its not recommended to use as a common purpose 
+   * reader.
+   * @param clazz the class used to locate the resource
+   * @param resource the resource name to load
+   * @param charSet the expected charset
+   * @return a reader to read the given file
+   * 
+   */
+  public static Reader getDecodingReader(Class<?> clazz, String resource, Charset charSet) throws IOException {
+    InputStream stream = null;
+    boolean success = false;
+    try {
+      stream = clazz
+      .getResourceAsStream(resource);
+      final Reader reader = getDecodingReader(stream, charSet);
+      success = true;
+      return reader;
+    } finally {
+      if (!success) {
+        IOUtils.close(stream);
+      }
+    }
+  }
+
 
 }

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/Packed64.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/Packed64.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/Packed64.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/Packed64.java Mon Nov 14 22:36:20 2011
@@ -38,7 +38,7 @@ class Packed64 extends PackedInts.Reader
   static final int MOD_MASK = BLOCK_SIZE - 1; // x % BLOCK_SIZE
 
   private static final int ENTRY_SIZE = BLOCK_SIZE + 1;
-  private static final int FAC_BITPOS = 3;
+  static final int FAC_BITPOS = 3;
 
   /*
    * In order to make an efficient value-getter, conditionals should be
@@ -50,10 +50,9 @@ class Packed64 extends PackedInts.Reader
    * the right bits. By always shifting the second block right and applying
    * a mask, we get the right bits there. After that, we | the two bitsets.
   */
-  private static final int[][] SHIFTS =
-          new int[ENTRY_SIZE][ENTRY_SIZE * FAC_BITPOS];
-          //new int[BLOCK_SIZE+1][BLOCK_SIZE][BLOCK_SIZE+1];
-  private static final long[][] MASKS = new long[ENTRY_SIZE][ENTRY_SIZE];
+  static final int[][] SHIFTS =
+    new int[ENTRY_SIZE][ENTRY_SIZE * FAC_BITPOS];
+  static final long[][] MASKS = new long[ENTRY_SIZE][ENTRY_SIZE];
 
   static { // Generate shifts
       for (int elementBits = 1 ; elementBits <= BLOCK_SIZE ; elementBits++) {

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/PackedInts.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/PackedInts.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/PackedInts.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/PackedInts.java Mon Nov 14 22:36:20 2011
@@ -104,14 +104,6 @@ public class PackedInts {
     long advance(int ord) throws IOException;
   }
   
-  public static interface RandomAccessReaderIterator extends ReaderIterator {
-    /**
-     * @param index the position of the wanted value.
-     * @return the value at the stated index.
-     */
-    long get(int index) throws IOException;
-  }
-  
   /**
    * A packed integer array that can be modified.
    * @lucene.internal
@@ -230,22 +222,28 @@ public class PackedInts {
    * @lucene.internal
    */
   public static ReaderIterator getReaderIterator(IndexInput in) throws IOException {
-    return getRandomAccessReaderIterator(in);
+    CodecUtil.checkHeader(in, CODEC_NAME, VERSION_START, VERSION_START);
+    final int bitsPerValue = in.readVInt();
+    assert bitsPerValue > 0 && bitsPerValue <= 64: "bitsPerValue=" + bitsPerValue;
+    final int valueCount = in.readVInt();
+    return new PackedReaderIterator(bitsPerValue, valueCount, in);
   }
   
   /**
-   * Retrieve PackedInts as a {@link RandomAccessReaderIterator}
+   * Retrieve PackedInts.Reader that does not load values
+   * into RAM but rather accesses all values via the
+   * provided IndexInput.
    * @param in positioned at the beginning of a stored packed int structure.
-   * @return an iterator to access the values
+   * @return an Reader to access the values
    * @throws IOException if the structure could not be retrieved.
    * @lucene.internal
    */
-  public static RandomAccessReaderIterator getRandomAccessReaderIterator(IndexInput in) throws IOException {
+  public static Reader getDirectReader(IndexInput in) throws IOException {
     CodecUtil.checkHeader(in, CODEC_NAME, VERSION_START, VERSION_START);
     final int bitsPerValue = in.readVInt();
     assert bitsPerValue > 0 && bitsPerValue <= 64: "bitsPerValue=" + bitsPerValue;
     final int valueCount = in.readVInt();
-    return new PackedReaderIterator(bitsPerValue, valueCount, in);
+    return new DirectReader(bitsPerValue, valueCount, in);
   }
   
   /**

Modified: lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/PackedReaderIterator.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/PackedReaderIterator.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/PackedReaderIterator.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/java/org/apache/lucene/util/packed/PackedReaderIterator.java Mon Nov 14 22:36:20 2011
@@ -21,15 +21,13 @@ import org.apache.lucene.store.IndexInpu
 
 import java.io.IOException;
 
-final class PackedReaderIterator implements PackedInts.RandomAccessReaderIterator {
+final class PackedReaderIterator implements PackedInts.ReaderIterator {
   private long pending;
   private int pendingBitsLeft;
   private final IndexInput in;
   private final int bitsPerValue;
   private final int valueCount;
   private int position = -1;
-  private long currentValue;
-  private final long startPointer;
 
   // masks[n-1] masks for bottom n bits
   private final long[] masks;
@@ -41,7 +39,6 @@ final class PackedReaderIterator impleme
     this.bitsPerValue = bitsPerValue;
     
     this.in = in;
-    startPointer = in.getFilePointer();
     masks = new long[bitsPerValue];
 
     long v = 1;
@@ -79,7 +76,7 @@ final class PackedReaderIterator impleme
     }
     
     ++position;
-    return currentValue = result;
+    return result;
   }
 
   public void close() throws IOException {
@@ -109,26 +106,6 @@ final class PackedReaderIterator impleme
       pendingBitsLeft = 64 - (int)(skip % 64);
     }
     position = ord-1;
-    return currentValue = next();
-  }
-  
-
-  @Override
-  public long get(int index) throws IOException {
-    assert index < valueCount : "ord must be less than valueCount";
-    if (index < position) {
-      pendingBitsLeft = 0;
-      final long bitsToSkip = (((long) bitsPerValue) * (long) index);
-      final long skip = bitsToSkip - pendingBitsLeft;
-      final long closestByte = (skip >> 6) << 3;
-      in.seek(startPointer + closestByte);
-      pending = in.readLong();
-      pendingBitsLeft = 64 - (int) (skip % 64);
-      position = index - 1;
-      return currentValue = next();
-    } else if (index == position) {
-      return currentValue;
-    }
-    return advance(index);
+    return next();
   }
 }

Modified: lucene/dev/branches/solrcloud/lucene/src/resources/META-INF/services/org.apache.lucene.index.codecs.Codec
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/resources/META-INF/services/org.apache.lucene.index.codecs.Codec?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/resources/META-INF/services/org.apache.lucene.index.codecs.Codec (original)
+++ lucene/dev/branches/solrcloud/lucene/src/resources/META-INF/services/org.apache.lucene.index.codecs.Codec Mon Nov 14 22:36:20 2011
@@ -15,3 +15,4 @@
 
 org.apache.lucene.index.codecs.lucene40.Lucene40Codec
 org.apache.lucene.index.codecs.lucene3x.Lucene3xCodec
+org.apache.lucene.index.codecs.simpletext.SimpleTextCodec

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java Mon Nov 14 22:36:20 2011
@@ -216,7 +216,7 @@ public abstract class CollationTestBase 
         doc.add(new TextField("Denmark", denmarkAnalyzer.tokenStream("Denmark", new StringReader(sortData[i][5]))));
       writer.addDocument(doc);
     }
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
     IndexSearcher searcher = new IndexSearcher(indexStore, true);
 

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java Mon Nov 14 22:36:20 2011
@@ -55,18 +55,18 @@ public class MockRandomMergePolicy exten
   }
 
   @Override
-  public MergeSpecification findMergesForOptimize(
-       SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToOptimize)
+  public MergeSpecification findForcedMerges(
+       SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfo,Boolean> segmentsToMerge)
     throws CorruptIndexException, IOException {
 
     final List<SegmentInfo> eligibleSegments = new ArrayList<SegmentInfo>();
     for(SegmentInfo info : segmentInfos) {
-      if (segmentsToOptimize.containsKey(info)) {
+      if (segmentsToMerge.containsKey(info)) {
         eligibleSegments.add(info);
       }
     }
 
-    //System.out.println("MRMP: findMergesForOptimize sis=" + segmentInfos + " eligible=" + eligibleSegments);
+    //System.out.println("MRMP: findMerges sis=" + segmentInfos + " eligible=" + eligibleSegments);
     MergeSpecification mergeSpec = null;
     if (eligibleSegments.size() > 1 || (eligibleSegments.size() == 1 && eligibleSegments.get(0).hasDeletions())) {
       mergeSpec = new MergeSpecification();
@@ -85,7 +85,7 @@ public class MockRandomMergePolicy exten
     if (mergeSpec != null) {
       for(OneMerge merge : mergeSpec.merges) {
         for(SegmentInfo info : merge.segments) {
-          assert segmentsToOptimize.containsKey(info);
+          assert segmentsToMerge.containsKey(info);
         }
       }
     }

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomCodec.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomCodec.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomCodec.java Mon Nov 14 22:36:20 2011
@@ -52,7 +52,10 @@ public class RandomCodec extends Lucene4
   /** shuffled list of postingsformats to use for new mappings */
   private List<PostingsFormat> formats = new ArrayList<PostingsFormat>();
   /** memorized field->postingsformat mappings */
-  private Map<String,PostingsFormat> previousMappings = new HashMap<String,PostingsFormat>();
+  // note: we have to sync this map even though its just for debugging/toString, 
+  // otherwise DWPT's .toString() calls that iterate over the map can 
+  // cause concurrentmodificationexception if indexwriter's infostream is on
+  private Map<String,PostingsFormat> previousMappings = Collections.synchronizedMap(new HashMap<String,PostingsFormat>());
   private final int perFieldSeed;
 
   @Override

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java Mon Nov 14 22:36:20 2011
@@ -38,7 +38,7 @@ import org.apache.lucene.util._TestUtil;
 
 /** Silly class that randomizes the indexing experience.  EG
  *  it may swap in a different merge policy/scheduler; may
- *  commit periodically; may or may not optimize in the end,
+ *  commit periodically; may or may not forceMerge in the end,
  *  may flush by doc count instead of RAM, etc. 
  */
 
@@ -100,7 +100,6 @@ public class RandomIndexWriter implement
     if (LuceneTestCase.VERBOSE) {
       System.out.println("RIW config=" + w.getConfig());
       System.out.println("codec default=" + codec.getName());
-      w.setInfoStream(System.out);
     }
     /* TODO: find some what to make that random...
      * This must be fixed across all fixed bytes 
@@ -324,8 +323,8 @@ public class RandomIndexWriter implement
     return getReader(true);
   }
 
-  private boolean doRandomOptimize = true;
-  private boolean doRandomOptimizeAssert = true;
+  private boolean doRandomForceMerge = true;
+  private boolean doRandomForceMergeAssert = true;
 
   public void expungeDeletes(boolean doWait) throws IOException {
     w.expungeDeletes(doWait);
@@ -335,25 +334,25 @@ public class RandomIndexWriter implement
     w.expungeDeletes();
   }
 
-  public void setDoRandomOptimize(boolean v) {
-    doRandomOptimize = v;
+  public void setDoRandomForceMerge(boolean v) {
+    doRandomForceMerge = v;
   }
 
-  public void setDoRandomOptimizeAssert(boolean v) {
-    doRandomOptimizeAssert = v;
+  public void setDoRandomForceMergeAssert(boolean v) {
+    doRandomForceMergeAssert = v;
   }
 
-  private void doRandomOptimize() throws IOException {
-    if (doRandomOptimize) {
+  private void doRandomForceMerge() throws IOException {
+    if (doRandomForceMerge) {
       final int segCount = w.getSegmentCount();
       if (r.nextBoolean() || segCount == 0) {
-        // full optimize
-        w.optimize();
+        // full forceMerge
+        w.forceMerge(1);
       } else {
-        // partial optimize
+        // partial forceMerge
         final int limit = _TestUtil.nextInt(r, 1, segCount);
-        w.optimize(limit);
-        assert !doRandomOptimizeAssert || w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount();
+        w.forceMerge(limit);
+        assert !doRandomForceMergeAssert || w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount();
       }
     }
     switchDoDocValues();
@@ -362,7 +361,7 @@ public class RandomIndexWriter implement
   public IndexReader getReader(boolean applyDeletions) throws IOException {
     getReaderCalled = true;
     if (r.nextInt(4) == 2) {
-      doRandomOptimize();
+      doRandomForceMerge();
     }
     // If we are writing with PreFlexRW, force a full
     // IndexReader.open so terms are sorted in codepoint
@@ -395,21 +394,21 @@ public class RandomIndexWriter implement
    */
   public void close() throws IOException {
     // if someone isn't using getReader() API, we want to be sure to
-    // maybeOptimize since presumably they might open a reader on the dir.
+    // forceMerge since presumably they might open a reader on the dir.
     if (getReaderCalled == false && r.nextInt(8) == 2) {
-      doRandomOptimize();
+      doRandomForceMerge();
     }
     w.close();
   }
 
   /**
-   * Forces an optimize.
+   * Forces a forceMerge.
    * <p>
    * NOTE: this should be avoided in tests unless absolutely necessary,
    * as it will result in less test coverage.
-   * @see IndexWriter#optimize()
+   * @see IndexWriter#forceMerge(int)
    */
-  public void optimize() throws IOException {
-    w.optimize();
+  public void forceMerge(int maxSegmentCount) throws IOException {
+    w.forceMerge(maxSegmentCount);
   }
 }

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java Mon Nov 14 22:36:20 2011
@@ -47,13 +47,14 @@ import org.apache.lucene.store.Directory
 import org.apache.lucene.store.MockDirectoryWrapper;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FailOnNonBulkMergesInfoStream;
 import org.apache.lucene.util.LineFileDocs;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.NamedThreadFactory;
 import org.apache.lucene.util._TestUtil;
 
 // TODO
-//   - mix in optimize, addIndexes
+//   - mix in forceMerge, addIndexes
 //   - randomly mix in non-congruent docs
 
 /** Utility class that spawns multiple indexing and
@@ -435,7 +436,8 @@ public abstract class ThreadedIndexingAn
     final File tempDir = _TestUtil.getTempDir(testName);
     dir = newFSDirectory(tempDir);
     ((MockDirectoryWrapper) dir).setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves.
-    final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+        setInfoStream(new FailOnNonBulkMergesInfoStream());
 
     if (LuceneTestCase.TEST_NIGHTLY) {
       // newIWConfig makes smallish max seg size, which
@@ -479,9 +481,6 @@ public abstract class ThreadedIndexingAn
       });
 
     writer = new IndexWriter(dir, conf);
-    if (VERBOSE) {
-      writer.setInfoStream(System.out);
-    }
     _TestUtil.reduceOpenFiles(writer);
 
     final ExecutorService es = random.nextBoolean() ? null : Executors.newCachedThreadPool(new NamedThreadFactory(testName));
@@ -616,7 +615,6 @@ public abstract class ThreadedIndexingAn
 
     assertEquals("index=" + writer.segString() + " addCount=" + addCount + " delCount=" + delCount, addCount.get() - delCount.get(), writer.numDocs());
 
-    assertFalse(writer.anyNonBulkMerges);
     doClose();
     writer.close(false);
 

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java Mon Nov 14 22:36:20 2011
@@ -32,7 +32,6 @@ import java.util.Map.Entry;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -41,17 +40,9 @@ import org.apache.lucene.document.FieldT
 import org.apache.lucene.index.*;
 import org.apache.lucene.index.codecs.Codec;
 import org.apache.lucene.index.codecs.PostingsFormat;
-import org.apache.lucene.index.codecs.lucene3x.Lucene3xCodec;
-import org.apache.lucene.index.codecs.lucene3x.Lucene3xPostingsFormat;
 import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
-import org.apache.lucene.index.codecs.mockintblock.MockFixedIntBlockPostingsFormat;
-import org.apache.lucene.index.codecs.mockintblock.MockVariableIntBlockPostingsFormat;
-import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat;
-import org.apache.lucene.index.codecs.mockrandom.MockRandomPostingsFormat;
-import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat;
 import org.apache.lucene.index.codecs.preflexrw.PreFlexRWCodec;
-import org.apache.lucene.index.codecs.preflexrw.PreFlexRWPostingsFormat;
-import org.apache.lucene.index.codecs.pulsing.PulsingPostingsFormat;
+import org.apache.lucene.index.codecs.simpletext.SimpleTextCodec;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.FieldCache.CacheEntry;
@@ -110,6 +101,8 @@ public abstract class LuceneTestCase ext
    * expected to print any messages.
    */
   public static final boolean VERBOSE = Boolean.getBoolean("tests.verbose");
+  
+  public static final boolean INFOSTREAM = Boolean.parseBoolean(System.getProperty("tests.infostream", Boolean.toString(VERBOSE)));
 
   /** Use this constant when creating Analyzers and any other version-dependent stuff.
    * <p><b>NOTE:</b> Change this when development starts for new Lucene version:
@@ -139,6 +132,8 @@ public abstract class LuceneTestCase ext
   // by default we randomly pick a different codec for
   // each test case (non-J4 tests) and each test class (J4
   // tests)
+  /** Gets the codec to run tests with. */
+  public static final String TEST_CODEC = System.getProperty("tests.codec", "random");
   /** Gets the postingsFormat to run tests with. */
   public static final String TEST_POSTINGSFORMAT = System.getProperty("tests.postingsformat", "random");
   /** Gets the locale to run tests with */
@@ -216,6 +211,8 @@ public abstract class LuceneTestCase ext
   // default codec
   private static Codec savedCodec;
   
+  private static InfoStream savedInfoStream;
+
   private static SimilarityProvider similarityProvider;
 
   private static Locale locale;
@@ -265,13 +262,30 @@ public abstract class LuceneTestCase ext
         System.out.println("Loaded postingsFormat: '" + postingsFormat + "': " + PostingsFormat.forName(postingsFormat).getClass().getName());
       }
     }
+    
+    savedInfoStream = InfoStream.getDefault();
+    if (INFOSTREAM) {
+      // consume random for consistency
+      random.nextBoolean();
+      InfoStream.setDefault(new PrintStreamInfoStream(System.out));
+    } else {
+      if (random.nextBoolean()) {
+        InfoStream.setDefault(new NullInfoStream());
+      }
+    }
 
     PREFLEX_IMPERSONATION_IS_ACTIVE = false;
     savedCodec = Codec.getDefault();
     final Codec codec;
-    if ("Lucene3x".equals(TEST_POSTINGSFORMAT) || ("random".equals(TEST_POSTINGSFORMAT) && random.nextInt(4) == 0)) { // preflex-only setup
+    int randomVal = random.nextInt(10);
+    
+    if ("Lucene3x".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal < 2)) { // preflex-only setup
       codec = new PreFlexRWCodec();
       PREFLEX_IMPERSONATION_IS_ACTIVE = true;
+    } else if ("SimpleText".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal == 9)) {
+      codec = new SimpleTextCodec();
+    } else if (!"random".equals(TEST_CODEC)) {
+      codec = Codec.forName(TEST_CODEC);
     } else if ("random".equals(TEST_POSTINGSFORMAT)) {
       codec = new RandomCodec(random, useNoMemoryExpensiveCodec);
     } else {
@@ -342,6 +356,7 @@ public abstract class LuceneTestCase ext
     
     String codecDescription = Codec.getDefault().toString();
     Codec.setDefault(savedCodec);
+    InfoStream.setDefault(savedInfoStream);
     Locale.setDefault(savedLocale);
     TimeZone.setDefault(savedTimeZone);
     System.clearProperty("solr.solr.home");
@@ -1154,8 +1169,13 @@ public abstract class LuceneTestCase ext
     return d;
   }
 
-  /** Registers a temp file that will be deleted when tests are done. */
-  public static void registerTempFile(File tmpFile) {
+  /**
+   * Registers a temp directory that will be deleted when tests are done. This
+   * is used by {@link _TestUtil#getTempDir(String)} and
+   * {@link _TestUtil#unzip(File, File)}, so you should call these methods when
+   * possible.
+   */
+  static void registerTempDir(File tmpFile) {
     tempDirs.put(tmpFile.getAbsoluteFile(), Thread.currentThread().getStackTrace());
   }
   
@@ -1168,11 +1188,9 @@ public abstract class LuceneTestCase ext
       final Class<? extends Directory> clazz = Class.forName(clazzName).asSubclass(Directory.class);
       // If it is a FSDirectory type, try its ctor(File)
       if (FSDirectory.class.isAssignableFrom(clazz)) {
-        final File tmpFile = _TestUtil.createTempFile("test", "tmp", TEMP_DIR);
-        tmpFile.delete();
-        tmpFile.mkdir();
-        registerTempFile(tmpFile);
-        return newFSDirectoryImpl(clazz.asSubclass(FSDirectory.class), tmpFile);
+        final File dir = _TestUtil.getTempDir("index");
+        dir.mkdirs(); // ensure it's created so we 'have' it.
+        return newFSDirectoryImpl(clazz.asSubclass(FSDirectory.class), dir);
       }
 
       // try empty ctor
@@ -1273,12 +1291,16 @@ public abstract class LuceneTestCase ext
   // extra params that were overridden needed to reproduce the command
   private static String reproduceWithExtraParams() {
     StringBuilder sb = new StringBuilder();
+    if (!TEST_CODEC.equals("random")) sb.append(" -Dtests.codec=").append(TEST_CODEC);
     if (!TEST_POSTINGSFORMAT.equals("random")) sb.append(" -Dtests.postingsformat=").append(TEST_POSTINGSFORMAT);
     if (!TEST_LOCALE.equals("random")) sb.append(" -Dtests.locale=").append(TEST_LOCALE);
     if (!TEST_TIMEZONE.equals("random")) sb.append(" -Dtests.timezone=").append(TEST_TIMEZONE);
     if (!TEST_DIRECTORY.equals("random")) sb.append(" -Dtests.directory=").append(TEST_DIRECTORY);
     if (RANDOM_MULTIPLIER > 1) sb.append(" -Dtests.multiplier=").append(RANDOM_MULTIPLIER);
     if (TEST_NIGHTLY) sb.append(" -Dtests.nightly=true");
+    // TODO we can't randomize this yet (it drives ant crazy) but this makes tests reproduceable
+    // in case machines have different default charsets...
+    sb.append(" -Dargs=\"-Dfile.encoding=" + System.getProperty("file.encoding") + "\"");
     return sb.toString();
   }
 
@@ -1297,7 +1319,7 @@ public abstract class LuceneTestCase ext
       context = IOContext.READONCE;
       break;
     case 3:
-      context = new IOContext(new MergeInfo(randomNumDocs, size, true, false));
+      context = new IOContext(new MergeInfo(randomNumDocs, size, true, -1));
       break;
     case 4:
       context = new IOContext(new FlushInfo(randomNumDocs, size));

Modified: lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/_TestUtil.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/_TestUtil.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/_TestUtil.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test-framework/java/org/apache/lucene/util/_TestUtil.java Mon Nov 14 22:36:20 2011
@@ -62,7 +62,7 @@ public class _TestUtil {
     try {
       File f = createTempFile(desc, "tmp", LuceneTestCase.TEMP_DIR);
       f.delete();
-      LuceneTestCase.registerTempFile(f);
+      LuceneTestCase.registerTempDir(f);
       return f;
     } catch (IOException e) {
       throw new RuntimeException(e);
@@ -74,6 +74,9 @@ public class _TestUtil {
    */
   public static void rmDir(File dir) throws IOException {
     if (dir.exists()) {
+      if (dir.isFile() && !dir.delete()) {
+        throw new IOException("could not delete " + dir);
+      }
       for (File f : dir.listFiles()) {
         if (f.isDirectory()) {
           rmDir(f);
@@ -101,7 +104,7 @@ public class _TestUtil {
     rmDir(destDir);
     
     destDir.mkdir();
-    LuceneTestCase.registerTempFile(destDir);
+    LuceneTestCase.registerTempDir(destDir);
     
     while (entries.hasMoreElements()) {
       ZipEntry entry = entries.nextElement();
@@ -155,6 +158,9 @@ public class _TestUtil {
       System.out.println(bos.toString());
       throw new RuntimeException("CheckIndex failed");
     } else {
+      if (LuceneTestCase.INFOSTREAM) {
+        System.out.println(bos.toString());
+      }
       return indexStatus;
     }
   }

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestDemo.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestDemo.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestDemo.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestDemo.java Mon Nov 14 22:36:20 2011
@@ -45,7 +45,6 @@ public class TestDemo extends LuceneTest
     // To store an index on disk, use this instead:
     //Directory directory = FSDirectory.open("/tmp/testindex");
     RandomIndexWriter iwriter = new RandomIndexWriter(random, directory, analyzer);
-    iwriter.w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm";
     String text = "This is the text to be indexed. " + longTerm;

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestExternalCodecs.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestExternalCodecs.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestExternalCodecs.java Mon Nov 14 22:36:20 2011
@@ -25,11 +25,9 @@ import org.apache.lucene.document.*;
 import org.apache.lucene.index.*;
 import org.apache.lucene.index.codecs.*;
 import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
-import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat;
 import org.apache.lucene.search.*;
 import org.apache.lucene.store.*;
 import org.apache.lucene.util.*;
-import org.apache.lucene.util.Bits;
 
 /* Intentionally outside of oal.index to verify fully
    external codecs work fine */
@@ -68,7 +66,6 @@ public class TestExternalCodecs extends 
         setCodec(new CustomPerFieldCodec()).
             setMergePolicy(newLogMergePolicy(3))
     );
-    w.setInfoStream(VERBOSE ? System.out : null);
     Document doc = new Document();
     // uses default codec:
     doc.add(newField("field1", "this field uses the standard codec as the test", TextField.TYPE_UNSTORED));
@@ -105,7 +102,7 @@ public class TestExternalCodecs extends 
       System.out.println("\nTEST: now delete 2nd doc");
     }
     w.deleteDocuments(new Term("id", "44"));
-    w.optimize();
+    w.forceMerge(1);
     r = IndexReader.open(w, true);
     assertEquals(NUM_DOCS-2, r.maxDoc());
     assertEquals(NUM_DOCS-2, r.numDocs());

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java Mon Nov 14 22:36:20 2011
@@ -142,7 +142,7 @@ public class TestMergeSchedulerExternal 
     writer.commit(); // trigger flush
     writer.addDocument(new Document());
     writer.commit(); // trigger flush
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
     dir.close();
   }

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java Mon Nov 14 22:36:20 2011
@@ -87,7 +87,6 @@ public class TestSearchForDuplicates ext
       IndexWriter writer = new IndexWriter(directory, conf);
       if (VERBOSE) {
         System.out.println("TEST: now build index MAX_DOCS=" + MAX_DOCS);
-        writer.setInfoStream(System.out);
       }
 
       for (int j = 0; j < MAX_DOCS; j++) {

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/Test2BPostings.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/Test2BPostings.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/Test2BPostings.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/Test2BPostings.java Mon Nov 14 22:36:20 2011
@@ -73,7 +73,7 @@ public class Test2BPostings extends Luce
         System.out.println(i + " of " + numDocs + "...");
       }
     }
-    w.optimize();
+    w.forceMerge(1);
     w.close();
     CheckIndex ci = new CheckIndex(dir);
     if (VERBOSE) {

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/Test2BTerms.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/Test2BTerms.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/Test2BTerms.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/Test2BTerms.java Mon Nov 14 22:36:20 2011
@@ -168,7 +168,6 @@ public class Test2BTerms extends LuceneT
                                       .setMergePolicy(newLogMergePolicy(false, 10))
                                       .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
 
-      w.setInfoStream(VERBOSE ? System.out : null);
       MergePolicy mp = w.getConfig().getMergePolicy();
       if (mp instanceof LogByteSizeMergePolicy) {
         // 1 petabyte:
@@ -196,8 +195,8 @@ public class Test2BTerms extends LuceneT
       }
       savedTerms = ts.savedTerms;
 
-      System.out.println("TEST: optimize");
-      w.optimize();
+      System.out.println("TEST: full merge");
+      w.forceMerge(1);
       System.out.println("TEST: close writer");
       w.close();
     }

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Mon Nov 14 22:36:20 2011
@@ -32,20 +32,14 @@ import org.apache.lucene.document.TextFi
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.codecs.Codec;
 import org.apache.lucene.index.codecs.DefaultDocValuesFormat;
-import org.apache.lucene.index.codecs.DefaultFieldsFormat;
+import org.apache.lucene.index.codecs.DefaultStoredFieldsFormat;
 import org.apache.lucene.index.codecs.DefaultSegmentInfosFormat;
 import org.apache.lucene.index.codecs.DocValuesFormat;
-import org.apache.lucene.index.codecs.FieldsFormat;
+import org.apache.lucene.index.codecs.StoredFieldsFormat;
 import org.apache.lucene.index.codecs.PostingsFormat;
 import org.apache.lucene.index.codecs.SegmentInfosFormat;
 import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
-import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsBaseFormat;
-import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsFormat;
-import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat;
-import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat;
 import org.apache.lucene.index.codecs.pulsing.Pulsing40PostingsFormat;
-import org.apache.lucene.index.codecs.pulsing.PulsingPostingsFormat;
-import org.apache.lucene.index.codecs.simpletext.SimpleTextPostingsFormat;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.store.AlreadyClosedException;
@@ -69,7 +63,6 @@ public class TestAddIndexes extends Luce
     writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
         new MockAnalyzer(random))
         .setOpenMode(OpenMode.CREATE));
-    writer.setInfoStream(VERBOSE ? System.out : null);
     // add 100 documents
     addDocs(writer, 100);
     assertEquals(100, writer.maxDoc());
@@ -115,7 +108,7 @@ public class TestAddIndexes extends Luce
     assertEquals(40, writer.maxDoc());
     writer.close();
 
-    // test doc count before segments are merged/index is optimized
+    // test doc count before segments are merged
     writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
     assertEquals(190, writer.maxDoc());
     writer.addIndexes(aux3);
@@ -129,9 +122,9 @@ public class TestAddIndexes extends Luce
 
     verifyTermDocs(dir, new Term("content", "bbb"), 50);
 
-    // now optimize it.
+    // now fully merge it.
     writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
 
     // make sure the new index is correct
@@ -171,7 +164,6 @@ public class TestAddIndexes extends Luce
 
     setUpDirs(dir, aux);
     IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
-    writer.setInfoStream(VERBOSE ? System.out : null);
     writer.addIndexes(aux);
 
     // Adds 10 docs, then replaces them with another 10
@@ -188,7 +180,7 @@ public class TestAddIndexes extends Luce
     q.add(new Term("content", "14"));
     writer.deleteDocuments(q);
 
-    writer.optimize();
+    writer.forceMerge(1);
     writer.commit();
 
     verifyNumDocs(dir, 1039);
@@ -226,7 +218,7 @@ public class TestAddIndexes extends Luce
     q.add(new Term("content", "14"));
     writer.deleteDocuments(q);
 
-    writer.optimize();
+    writer.forceMerge(1);
     writer.commit();
 
     verifyNumDocs(dir, 1039);
@@ -264,7 +256,7 @@ public class TestAddIndexes extends Luce
 
     writer.addIndexes(aux);
 
-    writer.optimize();
+    writer.forceMerge(1);
     writer.commit();
 
     verifyNumDocs(dir, 1039);
@@ -465,7 +457,6 @@ public class TestAddIndexes extends Luce
             setMaxBufferedDocs(100).
             setMergePolicy(newLogMergePolicy(10))
     );
-    writer.setInfoStream(VERBOSE ? System.out : null);
     writer.addIndexes(aux);
     assertEquals(30, writer.maxDoc());
     assertEquals(3, writer.getSegmentCount());
@@ -655,7 +646,6 @@ public class TestAddIndexes extends Luce
 
       dir2 = newDirectory();
       writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-      writer2.setInfoStream(VERBOSE ? System.out : null);
       writer2.commit();
       
 
@@ -733,10 +723,10 @@ public class TestAddIndexes extends Luce
       switch(j%5) {
       case 0:
         if (VERBOSE) {
-          System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then optimize");
+          System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then full merge");
         }
         writer2.addIndexes(dirs);
-        writer2.optimize();
+        writer2.forceMerge(1);
         break;
       case 1:
         if (VERBOSE) {
@@ -773,7 +763,6 @@ public class TestAddIndexes extends Luce
     final int NUM_ITER = TEST_NIGHTLY ? 15 : 5;
     final int NUM_COPY = 3;
     CommitAndAddIndexes c = new CommitAndAddIndexes(NUM_COPY);
-    c.writer2.setInfoStream(VERBOSE ? System.out : null);
     c.launchThreads(NUM_ITER);
 
     for(int i=0;i<100;i++)
@@ -839,10 +828,10 @@ public class TestAddIndexes extends Luce
       switch(j%5) {
       case 0:
         if (VERBOSE) {
-          System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes + optimize");
+          System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes + full merge");
         }
         writer2.addIndexes(dirs);
-        writer2.optimize();
+        writer2.forceMerge(1);
         break;
       case 1:
         if (VERBOSE) {
@@ -858,9 +847,9 @@ public class TestAddIndexes extends Luce
         break;
       case 3:
         if (VERBOSE) {
-          System.out.println("TEST: " + Thread.currentThread().getName() + ": optimize");
+          System.out.println("TEST: " + Thread.currentThread().getName() + ": full merge");
         }
-        writer2.optimize();
+        writer2.forceMerge(1);
         break;
       case 4:
         if (VERBOSE) {
@@ -898,9 +887,6 @@ public class TestAddIndexes extends Luce
 
     final int NUM_COPY = 50;
     CommitAndAddIndexes3 c = new CommitAndAddIndexes3(NUM_COPY);
-    if (VERBOSE) {
-      c.writer2.setInfoStream(System.out);
-    }
     c.launchThreads(-1);
 
     Thread.sleep(_TestUtil.nextInt(random, 10, 500));
@@ -1088,7 +1074,6 @@ public class TestAddIndexes extends Luce
     lmp.setUseCompoundFile(true);
     lmp.setNoCFSRatio(1.0); // Force creation of CFS
     IndexWriter w3 = new IndexWriter(dir, conf);
-    w3.setInfoStream(VERBOSE ? System.out : null);
     w3.addIndexes(readers);
     w3.close();
     // we should now see segments_X,
@@ -1172,8 +1157,8 @@ public class TestAddIndexes extends Luce
     }
 
     @Override
-    public FieldsFormat fieldsFormat() {
-      return new DefaultFieldsFormat();
+    public StoredFieldsFormat storedFieldsFormat() {
+      return new DefaultStoredFieldsFormat();
     }
 
     @Override
@@ -1223,7 +1208,7 @@ public class TestAddIndexes extends Luce
     }
 
     try {
-      IndexReader indexReader = IndexReader.open(toAdd);
+      IndexReader.open(toAdd);
       fail("no such codec");
     } catch (IllegalArgumentException ex) {
       // expected

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java Mon Nov 14 22:36:20 2011
@@ -131,7 +131,6 @@ public class TestAtomicUpdate extends Lu
         .setMaxBufferedDocs(7);
     ((TieredMergePolicy) conf.getMergePolicy()).setMaxMergeAtOnce(3);
     IndexWriter writer = new MockIndexWriter(directory, conf);
-    writer.setInfoStream(VERBOSE ? System.out : null);
 
     // Establish a base index of 100 docs:
     for(int i=0;i<100;i++) {

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Mon Nov 14 22:36:20 2011
@@ -79,16 +79,16 @@ public class TestBackwardsCompatibility 
   
 /*
   // These are only needed for the special upgrade test to verify
-  // that also optimized indexes are correctly upgraded by IndexUpgrader.
+  // that also single-segment indexes are correctly upgraded by IndexUpgrader.
   // You don't need them to be build for non-3.1 (the test is happy with just one
   // "old" segment format, version is unimportant:
   
-  public void testCreateOptimizedCFS() throws IOException {
-    createIndex("index.optimized.cfs", true, true);
+  public void testCreateSingleSegmentCFS() throws IOException {
+    createIndex("index.singlesegment.cfs", true, true);
   }
 
-  public void testCreateOptimizedNoCFS() throws IOException {
-    createIndex("index.optimized.nocfs", false, true);
+  public void testCreateSingleSegmentNoCFS() throws IOException {
+    createIndex("index.singlesegment.nocfs", false, true);
   }
 
 */  
@@ -118,8 +118,8 @@ public class TestBackwardsCompatibility 
                                      "29.nocfs",
   };
   
-  final String[] oldOptimizedNames = {"31.optimized.cfs",
-                                      "31.optimized.nocfs",
+  final String[] oldSingleSegmentNames = {"31.optimized.cfs",
+                                          "31.optimized.nocfs",
   };
   
   /** This test checks that *only* IndexFormatTooOldExceptions are thrown when you open and operate on too old indexes! */
@@ -180,7 +180,7 @@ public class TestBackwardsCompatibility 
     }
   }
   
-  public void testOptimizeOldIndex() throws Exception {
+  public void testFullyMergeOldIndex() throws Exception {
     for(int i=0;i<oldNames.length;i++) {
       if (VERBOSE) {
         System.out.println("\nTEST: index=" + oldNames[i]);
@@ -191,8 +191,7 @@ public class TestBackwardsCompatibility 
 
       IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-      w.setInfoStream(VERBOSE ? System.out : null);
-      w.optimize();
+      w.forceMerge(1);
       w.close();
       
       dir.close();
@@ -351,7 +350,6 @@ public class TestBackwardsCompatibility 
     Directory dir = newFSDirectory(oldIndexDir);
     // open writer
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
-    writer.setInfoStream(VERBOSE ? System.out : null);
     // add 10 docs
     for(int i=0;i<10;i++) {
       addDoc(writer, 35+i);
@@ -395,9 +393,9 @@ public class TestBackwardsCompatibility 
     doTestHits(hits, 43, searcher.getIndexReader());
     searcher.close();
 
-    // optimize
+    // fully merge
     writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
 
     searcher = new IndexSearcher(dir, true);
@@ -441,9 +439,9 @@ public class TestBackwardsCompatibility 
     doTestHits(hits, 33, searcher.getIndexReader());
     searcher.close();
 
-    // optimize
+    // fully merge
     IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
 
     searcher = new IndexSearcher(dir, true);
@@ -457,7 +455,7 @@ public class TestBackwardsCompatibility 
     dir.close();
   }
 
-  public File createIndex(String dirName, boolean doCFS, boolean optimized) throws IOException {
+  public File createIndex(String dirName, boolean doCFS, boolean fullyMerged) throws IOException {
     // we use a real directory name that is not cleaned up, because this method is only used to create backwards indexes:
     File indexDir = new File(LuceneTestCase.TEMP_DIR, dirName);
     _TestUtil.rmDir(indexDir);
@@ -474,12 +472,12 @@ public class TestBackwardsCompatibility 
       addDoc(writer, i);
     }
     assertEquals("wrong doc count", 35, writer.maxDoc());
-    if (optimized) {
-      writer.optimize();
+    if (fullyMerged) {
+      writer.forceMerge(1);
     }
     writer.close();
 
-    if (!optimized) {
+    if (!fullyMerged) {
       // open fresh writer so we get no prx file in the added segment
       mp = new LogByteSizeMergePolicy();
       mp.setUseCompoundFile(doCFS);
@@ -710,12 +708,12 @@ public class TestBackwardsCompatibility 
       assertEquals("wrong number of hits", 34, hits.length);
       
       // check decoding into field cache
-      int[] fci = FieldCache.DEFAULT.getInts(searcher.getIndexReader(), "trieInt");
+      int[] fci = FieldCache.DEFAULT.getInts(searcher.getIndexReader(), "trieInt", false);
       for (int val : fci) {
         assertTrue("value in id bounds", val >= 0 && val < 35);
       }
       
-      long[] fcl = FieldCache.DEFAULT.getLongs(searcher.getIndexReader(), "trieLong");
+      long[] fcl = FieldCache.DEFAULT.getLongs(searcher.getIndexReader(), "trieLong", false);
       for (long val : fcl) {
         assertTrue("value in id bounds", val >= 0L && val < 35L);
       }
@@ -745,9 +743,9 @@ public class TestBackwardsCompatibility 
   }
 
   public void testUpgradeOldIndex() throws Exception {
-    List<String> names = new ArrayList<String>(oldNames.length + oldOptimizedNames.length);
+    List<String> names = new ArrayList<String>(oldNames.length + oldSingleSegmentNames.length);
     names.addAll(Arrays.asList(oldNames));
-    names.addAll(Arrays.asList(oldOptimizedNames));
+    names.addAll(Arrays.asList(oldSingleSegmentNames));
     for(String name : names) {
       if (VERBOSE) {
         System.out.println("testUpgradeOldIndex: index=" +name);
@@ -756,7 +754,7 @@ public class TestBackwardsCompatibility 
       _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir);
       Directory dir = newFSDirectory(oldIndxeDir);
 
-      new IndexUpgrader(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null), VERBOSE ? System.out : null, false)
+      new IndexUpgrader(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null), false)
         .upgrade();
 
       checkAllSegmentsUpgraded(dir);
@@ -766,16 +764,16 @@ public class TestBackwardsCompatibility 
     }
   }
 
-  public void testUpgradeOldOptimizedIndexWithAdditions() throws Exception {
-    for (String name : oldOptimizedNames) {
+  public void testUpgradeOldSingleSegmentIndexWithAdditions() throws Exception {
+    for (String name : oldSingleSegmentNames) {
       if (VERBOSE) {
-        System.out.println("testUpgradeOldOptimizedIndexWithAdditions: index=" +name);
+        System.out.println("testUpgradeOldSingleSegmentIndexWithAdditions: index=" +name);
       }
       File oldIndxeDir = _TestUtil.getTempDir(name);
       _TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir);
       Directory dir = newFSDirectory(oldIndxeDir);
 
-      assertEquals("Original index must be optimized", 1, getNumberOfSegments(dir));
+      assertEquals("Original index must be single segment", 1, getNumberOfSegments(dir));
 
       // create a bunch of dummy segments
       int id = 40;
@@ -793,19 +791,19 @@ public class TestBackwardsCompatibility 
         w.close(false);
       }
       
-      // add dummy segments (which are all in current version) to optimized index
+      // add dummy segments (which are all in current
+      // version) to single segment index
       MergePolicy mp = random.nextBoolean() ? newLogMergePolicy() : newTieredMergePolicy();
       IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, null)
         .setMergePolicy(mp);
       IndexWriter w = new IndexWriter(dir, iwc);
-      w.setInfoStream(VERBOSE ? System.out : null);
       w.addIndexes(ramDir);
       w.close(false);
       
       // determine count of segments in modified index
       final int origSegCount = getNumberOfSegments(dir);
       
-      new IndexUpgrader(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null), VERBOSE ? System.out : null, false)
+      new IndexUpgrader(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null), false)
         .upgrade();
 
       final int segCount = checkAllSegmentsUpgraded(dir);

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java?rev=1201946&r1=1201945&r2=1201946&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/TestCheckIndex.java Mon Nov 14 22:36:20 2011
@@ -45,7 +45,7 @@ public class TestCheckIndex extends Luce
     for(int i=0;i<19;i++) {
       writer.addDocument(doc);
     }
-    writer.optimize();
+    writer.forceMerge(1);
     writer.close();
     IndexReader reader = IndexReader.open(dir, false);
     reader.deleteDocument(5);