You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2010/07/26 23:35:46 UTC

svn commit: r979453 [1/3] - in /lucene/dev/trunk: ./ lucene/ lucene/contrib/ lucene/contrib/highlighter/src/test/ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/ lucene/contrib/memory/src/test/org/apache/lucene/index/memory/ ...

Author: rmuir
Date: Mon Jul 26 21:35:43 2010
New Revision: 979453

URL: http://svn.apache.org/viewvc?rev=979453&view=rev
Log:
LUCENE-2554: preflex codec doesnt order terms correctly

Added:
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/codecs/preflexrw/
      - copied from r979432, lucene/dev/branches/preflexfixes/lucene/src/test/org/apache/lucene/index/codecs/preflexrw/
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/codecs/preflexrw/PreFlexFieldsWriter.java
      - copied unchanged from r979432, lucene/dev/branches/preflexfixes/lucene/src/test/org/apache/lucene/index/codecs/preflexrw/PreFlexFieldsWriter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/codecs/preflexrw/PreFlexRWCodec.java
      - copied unchanged from r979432, lucene/dev/branches/preflexfixes/lucene/src/test/org/apache/lucene/index/codecs/preflexrw/PreFlexRWCodec.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/codecs/preflexrw/TermInfosWriter.java
      - copied unchanged from r979432, lucene/dev/branches/preflexfixes/lucene/src/test/org/apache/lucene/index/codecs/preflexrw/TermInfosWriter.java
Removed:
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/codecs/preflex/TermInfosWriter.java
Modified:
    lucene/dev/trunk/   (props changed)
    lucene/dev/trunk/lucene/   (props changed)
    lucene/dev/trunk/lucene/build.xml   (props changed)
    lucene/dev/trunk/lucene/common-build.xml
    lucene/dev/trunk/lucene/contrib/   (props changed)
    lucene/dev/trunk/lucene/contrib/CHANGES.txt   (props changed)
    lucene/dev/trunk/lucene/contrib/highlighter/src/test/   (props changed)
    lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java   (props changed)
    lucene/dev/trunk/lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
    lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
    lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java
    lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
    lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
    lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
    lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
    lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/analysis/Tokenizer.java   (props changed)
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MultiTermsEnum.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/CodecProvider.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/FieldsConsumer.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexCodec.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfo.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java   (props changed)
    lucene/dev/trunk/lucene/src/java/org/apache/lucene/util/BytesRef.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestDateTools.java   (props changed)
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestDocument.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/document/TestNumberTools.java   (props changed)
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java   (props changed)
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestCodecs.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestFlex.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestMultiFields.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestRollback.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestBoolean2.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestBooleanOr.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestBooleanScorer.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestDateFilter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestDateSort.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestDocBoost.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestDocIdSet.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestExplanations.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFieldCache.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNot.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSimilarity.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSort.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTermVectors.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestWildcard.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/TestWildcardRandom.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestBasics.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/LuceneTestCase.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/TestAttributeSource.java   (props changed)
    lucene/dev/trunk/lucene/src/test/org/apache/lucene/util/_TestUtil.java
    lucene/dev/trunk/modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestISOLatin1AccentFilter.java   (props changed)
    lucene/dev/trunk/solr/   (props changed)
    lucene/dev/trunk/solr/build.xml
    lucene/dev/trunk/solr/common-build.xml
    lucene/dev/trunk/solr/lib/commons-httpclient-3.1.jar   (props changed)
    lucene/dev/trunk/solr/lib/jcl-over-slf4j-1.5.5.jar   (props changed)
    lucene/dev/trunk/solr/src/common/org/apache/solr/common/   (props changed)
    lucene/dev/trunk/solr/src/maven/solr-core-pom.xml.template   (props changed)
    lucene/dev/trunk/solr/src/maven/solr-solrj-pom.xml.template   (props changed)
    lucene/dev/trunk/solr/src/solrj/org/   (props changed)
    lucene/dev/trunk/solr/src/test/org/apache/solr/client/   (props changed)
    lucene/dev/trunk/solr/src/webapp/src/org/apache/solr/client/solrj/embedded/   (props changed)

Propchange: lucene/dev/trunk/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jul 26 21:35:43 2010
@@ -1 +1,2 @@
 /lucene/dev/branches/branch_3x:949730,957490,961612,979161
+/lucene/dev/branches/preflexfixes:967125-979432

Propchange: lucene/dev/trunk/lucene/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jul 26 21:35:43 2010
@@ -1,4 +1,5 @@
 /lucene/dev/branches/branch_3x/lucene:943137,949730,957490,960490,961612,979161
+/lucene/dev/branches/preflexfixes/lucene:967125-979432
 /lucene/java/branches/flex_1458:824912-931101
 /lucene/java/branches/lucene_2_4:748824
 /lucene/java/branches/lucene_2_9:817269-818600,829134,829881,831036,896850,909334,948516

Propchange: lucene/dev/trunk/lucene/build.xml
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jul 26 21:35:43 2010
@@ -1,4 +1,5 @@
 /lucene/dev/branches/branch_3x/lucene/build.xml:943137,949730,957490,960490,961612,979161
+/lucene/dev/branches/preflexfixes/lucene/build.xml:967125-979432
 /lucene/java/branches/flex_1458/build.xml:824912-931101
 /lucene/java/branches/lucene_2_9/build.xml:909334,948516
 /lucene/java/trunk/build.xml:924483-924731,924781,925176-925462

Modified: lucene/dev/trunk/lucene/common-build.xml
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/common-build.xml?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/common-build.xml (original)
+++ lucene/dev/trunk/lucene/common-build.xml Mon Jul 26 21:35:43 2010
@@ -56,6 +56,7 @@
   <property name="args" value=""/>
   <property name="threadsPerProcessor" value="1" />
   <property name="random.multiplier" value="1" />
+  <property name="tests.codec" value="random" />
     
   <property name="javac.deprecation" value="off"/>
   <property name="javac.debug" value="on"/>
@@ -434,6 +435,8 @@
 	
 	      <!-- allow tests to control debug prints -->
 	      <sysproperty key="tests.verbose" value="${tests.verbose}"/>
+              <!-- set the codec tests should run with -->
+	      <sysproperty key="tests.codec" value="${tests.codec}"/>
 	
 	      <!-- TODO: create propertyset for test properties, so each project can have its own set -->
               <sysproperty key="random.multiplier" value="${random.multiplier}"/>

Propchange: lucene/dev/trunk/lucene/contrib/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jul 26 21:35:43 2010
@@ -1,4 +1,5 @@
 /lucene/dev/branches/branch_3x/lucene/contrib:943137,949730,957490,960490,961612,979161
+/lucene/dev/branches/preflexfixes/lucene/contrib:967125-979432
 /lucene/java/branches/flex_1458/contrib:824912-931101
 /lucene/java/branches/lucene_2_4/contrib:748824
 /lucene/java/branches/lucene_2_9/contrib:817269-818600,825998,829134,829816,829881,831036,896850,909334,948516

Propchange: lucene/dev/trunk/lucene/contrib/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jul 26 21:35:43 2010
@@ -1,4 +1,5 @@
 /lucene/dev/branches/branch_3x/lucene/contrib/CHANGES.txt:943137,949730,957490,960490,961612,979161
+/lucene/dev/branches/preflexfixes/lucene/contrib/CHANGES.txt:967125-979432
 /lucene/java/branches/flex_1458/contrib/CHANGES.txt:824912-931101
 /lucene/java/branches/lucene_2_4/contrib/CHANGES.txt:748824
 /lucene/java/branches/lucene_2_9/contrib/CHANGES.txt:817269-818600,825998,826775,829134,829816,829881,831036,896850,948516

Propchange: lucene/dev/trunk/lucene/contrib/highlighter/src/test/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jul 26 21:35:43 2010
@@ -1,4 +1,5 @@
 /lucene/dev/branches/branch_3x/lucene/contrib/highlighter/src/test:943137,949730,957490,960490,961612,979161
+/lucene/dev/branches/preflexfixes/lucene/contrib/highlighter/src/test:967125-979432
 /lucene/java/branches/flex_1458/contrib/highlighter/src/test:824912-931101
 /lucene/java/branches/lucene_2_4/contrib/highlighter/src/test:748824
 /lucene/java/branches/lucene_2_9/contrib/highlighter/src/test:817269-818600,825998,826775,829134,829816,829881,831036,896850,909334,948516

Propchange: lucene/dev/trunk/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jul 26 21:35:43 2010
@@ -1,4 +1,5 @@
 /lucene/dev/branches/branch_3x/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java:943137,949730,957490,960490,961612,979161
+/lucene/dev/branches/preflexfixes/lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java:967125-979432
 /lucene/java/branches/flex_1458/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java:824912-931101
 /lucene/java/branches/lucene_2_9/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java:909334,948516
 /lucene/java/trunk/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java:924483-924731,924781,925176-925462

Modified: lucene/dev/trunk/lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (original)
+++ lucene/dev/trunk/lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java Mon Jul 26 21:35:43 2010
@@ -33,6 +33,7 @@ import org.apache.lucene.analysis.MockTo
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TopDocs;
@@ -107,8 +108,8 @@ public class MemoryIndexTest extends Bas
     
     RAMDirectory ramdir = new RAMDirectory();
     Analyzer analyzer = randomAnalyzer();
-    IndexWriter writer = new IndexWriter(ramdir, analyzer,
-        IndexWriter.MaxFieldLength.UNLIMITED);
+    IndexWriter writer = new IndexWriter(ramdir,
+                                         new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setCodecProvider(_TestUtil.alwaysCodec("Standard")));
     Document doc = new Document();
     Field field1 = new Field("foo", fooField.toString(), Field.Store.NO, Field.Index.ANALYZED);
     Field field2 = new Field("term", termField.toString(), Field.Store.NO, Field.Index.ANALYZED);

Modified: lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (original)
+++ lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java Mon Jul 26 21:35:43 2010
@@ -18,13 +18,13 @@ package org.apache.lucene.search;
  */
 
 import java.io.IOException;
+import java.util.Random;
 
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.RAMDirectory;
@@ -38,8 +38,7 @@ public class BooleanFilterTest extends L
 	protected void setUp() throws Exception {
 	  super.setUp();
 		directory = new RAMDirectory();
-		RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, new IndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
+		RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, new MockAnalyzer(MockTokenizer.WHITESPACE, false));
 		
 		//Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags
 		addDoc(writer, "admin guest", "010", "20040101","Y");

Modified: lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (original)
+++ lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java Mon Jul 26 21:35:43 2010
@@ -21,11 +21,9 @@ import java.util.Calendar;
 import java.util.GregorianCalendar;
 import java.util.Random;
 
-import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.BooleanClause;
@@ -63,9 +61,7 @@ public class ChainedFilterTest extends L
     super.setUp();
     random = newRandom();
     directory = new RAMDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random, directory, 
-        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
-
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
     Calendar cal = new GregorianCalendar();
     cal.clear();
     cal.setTimeInMillis(1041397200000L); // 2003 January 01
@@ -200,8 +196,7 @@ public class ChainedFilterTest extends L
   
   public void testWithCachingFilter() throws Exception {
     Directory dir = new RAMDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(random, dir, 
-        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir);
     IndexReader reader = writer.getReader();
     writer.close();
   

Modified: lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (original)
+++ lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java Mon Jul 26 21:35:43 2010
@@ -20,11 +20,9 @@ package org.apache.lucene.search;
 import java.io.IOException;
 import java.util.HashSet;
 
-import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.DocsEnum;
@@ -44,8 +42,7 @@ public class DuplicateFilterTest extends
 	protected void setUp() throws Exception {
     super.setUp();
 		directory = new RAMDirectory();
-		RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, 
-		    new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+		RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory);
 		
 		//Add series of docs with filterable fields : url, text and dates  flags
 		addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101");

Modified: lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java (original)
+++ lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java Mon Jul 26 21:35:43 2010
@@ -25,7 +25,6 @@ import org.apache.lucene.analysis.MockAn
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.RAMDirectory;
@@ -41,8 +40,7 @@ public class FuzzyLikeThisQueryTest exte
 	protected void setUp() throws Exception	{
 	  super.setUp();
 		directory = new RAMDirectory();
-		RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, 
-		    new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+		RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory);
 		
 		//Add series of docs with misspelt names
 		addDoc(writer, "jonathon smythe","1");

Modified: lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (original)
+++ lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java Mon Jul 26 21:35:43 2010
@@ -19,11 +19,9 @@ package org.apache.lucene.search;
 
 import java.util.HashSet;
 
-import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.RAMDirectory;
@@ -53,8 +51,7 @@ public class TermsFilterTest extends Luc
 	public void testMissingTerms() throws Exception {
 		String fieldName="field1";
 		RAMDirectory rd=new RAMDirectory();
-		RandomIndexWriter w = new RandomIndexWriter(newRandom(), rd, 
-		    new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+		RandomIndexWriter w = new RandomIndexWriter(newRandom(), rd);
 		for (int i = 0; i < 100; i++) {
 			Document doc=new Document();
 			int term=i*10; //terms are units of 10;

Modified: lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (original)
+++ lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java Mon Jul 26 21:35:43 2010
@@ -20,10 +20,8 @@ package org.apache.lucene.search.regex;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.search.IndexSearcher;
@@ -44,8 +42,7 @@ public class TestRegexQuery extends Luce
   protected void setUp() throws Exception {
     super.setUp();
     directory = new RAMDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, 
-        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory);
     Document doc = new Document();
     doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
     writer.addDocument(doc);

Modified: lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (original)
+++ lucene/dev/trunk/lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java Mon Jul 26 21:35:43 2010
@@ -28,7 +28,6 @@ import org.apache.lucene.analysis.MockTo
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
@@ -46,8 +45,7 @@ public class TestMoreLikeThis extends Lu
   protected void setUp() throws Exception {
     super.setUp();
     directory = new RAMDirectory();
-    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, 
-        new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+    RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory);
     
     // Add series of docs with specific information for MoreLikeThis
     addDoc(writer, "lucene");

Propchange: lucene/dev/trunk/lucene/src/java/org/apache/lucene/analysis/Tokenizer.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jul 26 21:35:43 2010
@@ -1,4 +1,5 @@
 /lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/analysis/Tokenizer.java:943137,949730,957490,960490,961612,979161
+/lucene/dev/branches/preflexfixes/lucene/src/java/org/apache/lucene/analysis/Tokenizer.java:967125-979432
 /lucene/java/branches/flex_1458/src/java/org/apache/lucene/analysis/Tokenizer.java:824912-931101
 /lucene/java/branches/lucene_2_9/src/java/org/apache/lucene/analysis/Tokenizer.java:909334,948516
 /lucene/java/trunk/src/java/org/apache/lucene/analysis/Tokenizer.java:924483-924731,924781,925176-925462

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MultiTermsEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MultiTermsEnum.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MultiTermsEnum.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/MultiTermsEnum.java Mon Jul 26 21:35:43 2010
@@ -116,7 +116,7 @@ public final class MultiTermsEnum extend
         // different TermComps
         final Comparator<BytesRef> subTermComp = termsEnumIndex.termsEnum.getComparator();
         if (subTermComp != null && !subTermComp.equals(termComp)) {
-          throw new IllegalStateException("sub-readers have different BytesRef.Comparators; cannot merge");
+          throw new IllegalStateException("sub-readers have different BytesRef.Comparators: " + subTermComp + " vs " + termComp + "; cannot merge");
         }
       }
 

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/CodecProvider.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/CodecProvider.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/CodecProvider.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/CodecProvider.java Mon Jul 26 21:35:43 2010
@@ -47,13 +47,12 @@ public abstract class CodecProvider {
 
   private static String defaultCodec = "Standard";
 
-  public final static String[] CORE_CODECS = new String[] {"Standard", "Sep", "Pulsing", "IntBlock"};
+  public final static String[] CORE_CODECS = new String[] {"Standard", "Sep", "Pulsing", "IntBlock", "PreFlex"};
 
   public void register(Codec codec) {
     if (codec.name == null) {
       throw new IllegalArgumentException("code.name is null");
     }
-
     if (!codecs.containsKey(codec.name)) {
       codecs.put(codec.name, codec);
       codec.getExtensions(knownExtensions);
@@ -61,6 +60,21 @@ public abstract class CodecProvider {
       throw new IllegalArgumentException("codec '" + codec.name + "' is already registered as a different codec instance");
     }
   }
+  
+  /** @lucene.internal */
+  public void unregister(Codec codec) {
+    if (codec.name == null) {
+      throw new IllegalArgumentException("code.name is null");
+    }
+    if (codecs.containsKey(codec.name)) {
+      Codec c = codecs.get(codec.name);
+      if (codec == c) {
+        codecs.remove(codec.name);
+      } else {
+        throw new IllegalArgumentException("codec '" + codec.name + "' is being impersonated by a different codec instance!!!");
+      }
+    }
+  }
 
   public Collection<String> getAllExtensions() {
     return knownExtensions;
@@ -111,8 +125,5 @@ class DefaultCodecProvider extends Codec
   @Override
   public Codec getWriter(SegmentWriteState state) {
     return lookup(CodecProvider.getDefaultCodec());
-    //return lookup("Pulsing");
-    //return lookup("Sep");
-    //return lookup("IntBlock");
   }
-}
\ No newline at end of file
+}

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/FieldsConsumer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/FieldsConsumer.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/FieldsConsumer.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/FieldsConsumer.java Mon Jul 26 21:35:43 2010
@@ -22,6 +22,7 @@ import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.FieldsEnum;
 
 import java.io.IOException;
+import java.io.Closeable;
 
 /** Abstract API that consumes terms, doc, freq, prox and
  *  payloads postings.  Concrete implementations of this
@@ -30,7 +31,7 @@ import java.io.IOException;
  *
  * @lucene.experimental
  */
-public abstract class FieldsConsumer {
+public abstract class FieldsConsumer implements Closeable {
 
   /** Add a new field */
   public abstract TermsConsumer addField(FieldInfo field) throws IOException;

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexCodec.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexCodec.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexCodec.java Mon Jul 26 21:35:43 2010
@@ -40,16 +40,16 @@ import org.apache.lucene.index.codecs.Fi
 public class PreFlexCodec extends Codec {
 
   /** Extension of terms file */
-  static final String TERMS_EXTENSION = "tis";
+  public static final String TERMS_EXTENSION = "tis";
 
   /** Extension of terms index file */
-  static final String TERMS_INDEX_EXTENSION = "tii";
+  public static final String TERMS_INDEX_EXTENSION = "tii";
 
   /** Extension of freq postings file */
-  static final String FREQ_EXTENSION = "frq";
+  public static final String FREQ_EXTENSION = "frq";
 
   /** Extension of prox postings file */
-  static final String PROX_EXTENSION = "prx";
+  public static final String PROX_EXTENSION = "prx";
 
   public PreFlexCodec() {
     name = "PreFlex";

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java Mon Jul 26 21:35:43 2010
@@ -40,12 +40,11 @@ import org.apache.lucene.store.IndexInpu
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.UnicodeUtil;
-import org.apache.lucene.util.ArrayUtil;
 
 /** Exposes flex API on a pre-flex index, as a codec. 
  * @lucene.experimental */
 public class PreFlexFields extends FieldsProducer {
-
+  
   private static final boolean DEBUG_SURROGATES = false;
 
   public TermInfosReader tis;
@@ -60,7 +59,7 @@ public class PreFlexFields extends Field
   private final int readBufferSize;
   private Directory cfsReader;
 
-  PreFlexFields(Directory dir, FieldInfos fieldInfos, SegmentInfo info, int readBufferSize, int indexDivisor)
+  public PreFlexFields(Directory dir, FieldInfos fieldInfos, SegmentInfo info, int readBufferSize, int indexDivisor)
     throws IOException {
 
     si = info;
@@ -107,6 +106,15 @@ public class PreFlexFields extends Field
     this.dir = dir;
   }
 
+  // If this returns, we do the surrogates dance so that the
+  // terms are sorted by unicode sort order.  This should be
+  // true when segments are used for "normal" searching;
+  // it's only false during testing, to create a pre-flex
+  // index, using the test-only PreFlexRW.
+  protected boolean sortTermsByUnicode() {
+    return true;
+  }
+
   static void files(Directory dir, SegmentInfo info, Collection<String> files) throws IOException {
     files.add(IndexFileNames.segmentFileName(info.name, "", PreFlexCodec.TERMS_EXTENSION));
     files.add(IndexFileNames.segmentFileName(info.name, "", PreFlexCodec.TERMS_INDEX_EXTENSION));
@@ -182,6 +190,12 @@ public class PreFlexFields extends Field
     if (cfsReader != null) {
       cfsReader.close();
     }
+    if (freqStream != null) {
+      freqStream.close();
+    }
+    if (proxStream != null) {
+      proxStream.close();
+    }
   }
 
   private class PreFlexFieldsEnum extends FieldsEnum {
@@ -228,7 +242,11 @@ public class PreFlexFields extends Field
     public Comparator<BytesRef> getComparator() {
       // Pre-flex indexes always sorted in UTF16 order, but
       // we remap on-the-fly to unicode order
-      return BytesRef.getUTF8SortedAsUnicodeComparator();
+      if (sortTermsByUnicode()) {
+        return BytesRef.getUTF8SortedAsUnicodeComparator();
+      } else {
+        return BytesRef.getUTF8SortedAsUTF16Comparator();
+      }
     }
   }
 
@@ -238,119 +256,225 @@ public class PreFlexFields extends Field
     private boolean skipNext;
     private BytesRef current;
 
-    private int[] surrogateSeekPending = new int[1];
-    private boolean[] surrogateDidSeekBack = new boolean[1];
-    private int surrogateSeekUpto;
-    private char[] pendingPrefix;
-
     private SegmentTermEnum seekTermEnum;
     private Term protoTerm;
+    
+    private static final byte UTF8_NON_BMP_LEAD = (byte) 0xf0;
+    private static final byte UTF8_HIGH_BMP_LEAD = (byte) 0xee;
+
+    // Returns true if the unicode char is "after" the
+    // surrogates in UTF16, ie >= U+E000 and <= U+FFFF:
+    private final boolean isHighBMPChar(byte[] b, int idx) {
+      return (b[idx] & UTF8_HIGH_BMP_LEAD) == UTF8_HIGH_BMP_LEAD;
+    }
+
+    // Returns true if the unicode char in the UTF8 byte
+    // sequence starting at idx encodes a char outside of
+    // BMP (ie what would be a surrogate pair in UTF16):
+    private final boolean isNonBMPChar(byte[] b, int idx) {
+      return (b[idx] & UTF8_NON_BMP_LEAD) == UTF8_NON_BMP_LEAD;
+    }
+
+    private final byte[] scratch = new byte[4];
+    private final BytesRef prevTerm = new BytesRef();
+    private final BytesRef scratchTerm = new BytesRef();
     private int newSuffixStart;
 
-    void reset(FieldInfo fieldInfo) throws IOException {
-      this.fieldInfo = fieldInfo;
-      protoTerm = new Term(fieldInfo.name);
-      if (termEnum == null) {
-        termEnum = getTermsDict().terms(protoTerm);
-        seekTermEnum = getTermsDict().terms(protoTerm);
+    // Swap in S, in place of E:
+    private boolean seekToNonBMP(SegmentTermEnum te, BytesRef term, int pos) throws IOException {
+      final int savLength = term.length;
+
+      assert term.offset == 0;
+
+      // The 3 bytes starting at downTo make up 1
+      // unicode character:
+      assert isHighBMPChar(term.bytes, pos);
+
+      // NOTE: we cannot make this assert, because
+      // AutomatonQuery legitimately sends us malformed UTF8
+      // (eg the UTF8 bytes with just 0xee)
+      // assert term.length >= pos + 3: "term.length=" + term.length + " pos+3=" + (pos+3) + " byte=" + Integer.toHexString(term.bytes[pos]) + " term=" + term.toString();
+
+      // Save the bytes && length, since we need to
+      // restore this if seek "back" finds no matching
+      // terms
+      if (term.bytes.length < 4+pos) {
+        term.grow(4+pos);
+      }
+
+      scratch[0] = term.bytes[pos];
+      scratch[1] = term.bytes[pos+1];
+      scratch[2] = term.bytes[pos+2];
+
+      term.bytes[pos] = (byte) 0xf0;
+      term.bytes[pos+1] = (byte) 0x90;
+      term.bytes[pos+2] = (byte) 0x80;
+      term.bytes[pos+3] = (byte) 0x80;
+      term.length = 4+pos;
+
+      if (DEBUG_SURROGATES) {
+        System.out.println("      try seek term=" + UnicodeUtil.toHexString(term.utf8ToString()));
+      }
+
+      // Seek "back":
+      getTermsDict().seekEnum(te, protoTerm.createTerm(term));
+
+      // Test if the term we seek'd to in fact found a
+      // surrogate pair at the same position as the E:
+      Term t2 = te.term();
+
+      // Cannot be null (or move to next field) because at
+      // "worst" it'd seek to the same term we are on now,
+      // unless we are being called from seek
+      if (t2 == null || t2.field() != fieldInfo.name) {
+        return false;
+      }
+
+      if (DEBUG_SURROGATES) {
+        System.out.println("      got term=" + UnicodeUtil.toHexString(t2.text()));
+      }
+
+      // Now test if prefix is identical and we found
+      // a non-BMP char at the same position:
+      BytesRef b2 = t2.bytes();
+      assert b2.offset == 0;
+
+      boolean matches;
+      if (b2.length >= term.length && isNonBMPChar(b2.bytes, pos)) {
+        matches = true;
+        for(int i=0;i<pos;i++) {
+          if (term.bytes[i] != b2.bytes[i]) {
+            matches = false;
+            break;
+          }
+        }              
       } else {
-        getTermsDict().seekEnum(termEnum, protoTerm);
+        matches = false;
       }
-      skipNext = true;
-      
-      surrogateSeekUpto = 0;
-      newSuffixStart = 0;
 
-      surrogatesDance();
+      // Restore term:
+      term.length = savLength;
+      term.bytes[pos] = scratch[0];
+      term.bytes[pos+1] = scratch[1];
+      term.bytes[pos+2] = scratch[2];
+
+      return matches;
     }
 
-    private void surrogatesDance() throws IOException {
-      
-      // Tricky: prior to 4.0, Lucene index sorted terms in
-      // UTF16 order, but as of 4.0 we sort by Unicode code
-      // point order.  These orders differ because of the
-      // surrrogates; so we have to fixup our enum, here, by
-      // carefully first seeking past the surrogates and
-      // then back again at the end.  The process is
-      // recursive, since any given term could have multiple
-      // new occurrences of surrogate pairs, so we use a
-      // stack to record the pending seek-backs.
+    // Seek type 2 "continue" (back to the start of the
+    // surrogates): scan the stripped suffix from the
+    // prior term, backwards. If there was an E in that
+    // part, then we try to seek back to S.  If that
+    // seek finds a matching term, we go there.
+    private boolean doContinue() throws IOException {
+
       if (DEBUG_SURROGATES) {
-        System.out.println("  dance start term=" + (termEnum.term() == null ? null : UnicodeUtil.toHexString(termEnum.term().text())));
+        System.out.println("  try cont");
       }
 
-      while(popPendingSeek());
-      while(pushNewSurrogate());
-    }
+      int downTo = prevTerm.length-1;
 
-    // only for debugging
-    private String getStack() {
-      if (surrogateSeekUpto == 0) {
-        return "null";
-      } else {
-        StringBuffer sb = new StringBuffer();
-        for(int i=0;i<surrogateSeekUpto;i++) {
-          if (i > 0) {
-            sb.append(' ');
+      boolean didSeek = false;
+      
+      final int limit = Math.min(newSuffixStart, scratchTerm.length-1);
+
+      while(downTo > limit) {
+
+        if (isHighBMPChar(prevTerm.bytes, downTo)) {
+
+          if (DEBUG_SURROGATES) {
+            System.out.println("    found E pos=" + downTo + " vs len=" + prevTerm.length);
+          }
+
+          if (seekToNonBMP(seekTermEnum, prevTerm, downTo)) {
+            // TODO: more efficient seek?
+            getTermsDict().seekEnum(termEnum, seekTermEnum.term());
+            //newSuffixStart = downTo+4;
+            newSuffixStart = downTo;
+            scratchTerm.copy(termEnum.term().bytes());
+            didSeek = true;
+            if (DEBUG_SURROGATES) {
+              System.out.println("      seek!");
+            }
+            break;
+          } else {
+            if (DEBUG_SURROGATES) {
+              System.out.println("      no seek");
+            }
           }
-          sb.append(surrogateSeekPending[i]);
         }
-        sb.append(" pendingSeekText=" + new String(pendingPrefix, 0, surrogateSeekPending[surrogateSeekUpto-1]));
-        return sb.toString();
+
+        // Shorten prevTerm in place so that we don't redo
+        // this loop if we come back here:
+        if ((prevTerm.bytes[downTo] & 0xc0) == 0xc0 || (prevTerm.bytes[downTo] & 0x80) == 0) {
+          prevTerm.length = downTo;
+        }
+        
+        downTo--;
       }
+
+      return didSeek;
     }
 
-    private boolean popPendingSeek() throws IOException {
+    // Look for seek type 3 ("pop"): if the delta from
+    // prev -> current was replacing an S with an E,
+    // we must now seek to beyond that E.  This seek
+    // "finishes" the dance at this character
+    // position.
+    private boolean doPop() throws IOException {
+
       if (DEBUG_SURROGATES) {
-        System.out.println("  check pop newSuffix=" + newSuffixStart + " stack=" + getStack());
+        System.out.println("  try pop");
       }
-      // if a .next() has advanced beyond the
-      // after-surrogates range we had last seeked to, we
-      // must seek back to the start and resume .next from
-      // there.  this pops the pending seek off the stack.
-      final Term t = termEnum.term();
-      if (surrogateSeekUpto > 0) {
-        final int seekPrefix = surrogateSeekPending[surrogateSeekUpto-1];
+
+      assert newSuffixStart <= prevTerm.length;
+      assert newSuffixStart < scratchTerm.length || newSuffixStart == 0;
+
+      if (prevTerm.length > newSuffixStart &&
+          isNonBMPChar(prevTerm.bytes, newSuffixStart) &&
+          isHighBMPChar(scratchTerm.bytes, newSuffixStart)) {
+
+        // Seek type 2 -- put 0xFF at this position:
+        scratchTerm.bytes[newSuffixStart] = (byte) 0xff;
+        scratchTerm.length = newSuffixStart+1;
+
         if (DEBUG_SURROGATES) {
-          System.out.println("    seekPrefix=" + seekPrefix);
+          System.out.println("    seek to term=" + UnicodeUtil.toHexString(scratchTerm.utf8ToString()) + " " + scratchTerm.toString());
         }
-        if (newSuffixStart < seekPrefix) {
-          assert pendingPrefix != null;
-          assert pendingPrefix.length > seekPrefix;
-          pendingPrefix[seekPrefix] = UnicodeUtil.UNI_SUR_HIGH_START;
-          pendingPrefix[1+seekPrefix] = UnicodeUtil.UNI_SUR_LOW_START;
-          Term t2 = protoTerm.createTerm(new BytesRef(pendingPrefix, 0, 2+seekPrefix));
+          
+        // TODO: more efficient seek?  can we simply swap
+        // the enums?
+        getTermsDict().seekEnum(termEnum, protoTerm.createTerm(scratchTerm));
+
+        final Term t2 = termEnum.term();
+
+        // We could hit EOF or different field since this
+        // was a seek "forward":
+        if (t2 != null && t2.field() == fieldInfo.name) {
+
           if (DEBUG_SURROGATES) {
-            System.out.println("    do pop; seek back to " + UnicodeUtil.toHexString(t2.text()));
+            System.out.println("      got term=" + UnicodeUtil.toHexString(t2.text()) + " " + t2.bytes());
           }
-          getTermsDict().seekEnum(termEnum, t2);
-          surrogateDidSeekBack[surrogateSeekUpto-1] = true;
 
-          // +2 because we don't want to re-check the
-          // surrogates we just seek'd back to
-          newSuffixStart = seekPrefix + 2;
+          final BytesRef b2 = t2.bytes();
+          assert b2.offset == 0;
+
+
+          // Set newSuffixStart -- we can't use
+          // termEnum's since the above seek may have
+          // done no scanning (eg, term was precisely
+          // and index term, or, was in the term seek
+          // cache):
+          scratchTerm.copy(b2);
+          setNewSuffixStart(prevTerm, scratchTerm);
+
           return true;
-        } else if (newSuffixStart == seekPrefix && surrogateDidSeekBack[surrogateSeekUpto-1] && t != null && t.field() == fieldInfo.name && t.text().charAt(seekPrefix) > UnicodeUtil.UNI_SUR_LOW_END) {
-          assert pendingPrefix != null;
-          assert pendingPrefix.length > seekPrefix;
-          pendingPrefix[seekPrefix] = 0xffff;
-          Term t2 = protoTerm.createTerm(new BytesRef(pendingPrefix, 0, 1+seekPrefix));
-          if (DEBUG_SURROGATES) {
-            System.out.println("    finish pop; seek fwd to " + UnicodeUtil.toHexString(t2.text()));
-          }
-          getTermsDict().seekEnum(termEnum, t2);
+        } else if (newSuffixStart != 0 || scratchTerm.length != 0) {
           if (DEBUG_SURROGATES) {
-            System.out.println("    found term=" + (termEnum.term() == null ? null : UnicodeUtil.toHexString(termEnum.term().text())));
-          }
-          surrogateSeekUpto--;
-
-          if (termEnum.term() == null || termEnum.term().field() != fieldInfo.name) {
-            // force pop
-            newSuffixStart = -1;
-          } else {
-            newSuffixStart = termEnum.newSuffixStart;
+            System.out.println("      got term=null (or next field)");
           }
-
+          newSuffixStart = 0;
+          scratchTerm.length = 0;
           return true;
         }
       }
@@ -358,117 +482,249 @@ public class PreFlexFields extends Field
       return false;
     }
 
-    private UnicodeUtil.UTF16Result termBuffer = new UnicodeUtil.UTF16Result();
-    private UnicodeUtil.UTF16Result seekBuffer = new UnicodeUtil.UTF16Result();
+    // Pre-flex indices store terms in UTF16 sort order, but
+    // certain queries require Unicode codepoint order; this
+    // method carefully seeks around surrogates to handle
+    // this impedance mismatch
+
+    private void surrogateDance() throws IOException {
+
+      if (!unicodeSortOrder) {
+        return;
+      }
+
+      // We are invoked after TIS.next() (by UTF16 order) to
+      // possibly seek to a different "next" (by unicode
+      // order) term.
+
+      // We scan only the "delta" from the last term to the
+      // current term, in UTF8 bytes.  We look at 1) the bytes
+      // stripped from the prior term, and then 2) the bytes
+      // appended to that prior term's prefix.
     
-    private boolean pushNewSurrogate() throws IOException {
+      // We don't care about specific UTF8 sequences, just
+      // the "category" of the UTF16 character.  Category S
+      // is a high/low surrogate pair (it non-BMP).
+      // Category E is any BMP char > UNI_SUR_LOW_END (and <
+      // U+FFFF). Category A is the rest (any unicode char
+      // <= UNI_SUR_HIGH_START).
+
+      // The core issue is that pre-flex indices sort the
+      // characters as ASE, while flex must sort as AES.  So
+      // when scanning, when we hit S, we must 1) seek
+      // forward to E and enum the terms there, then 2) seek
+      // back to S and enum all terms there, then 3) seek to
+      // after E.  Three different seek points (1, 2, 3).
+    
+      // We can easily detect S in UTF8: if a byte has
+      // prefix 11110 (0xf0), then that byte and the
+      // following 3 bytes encode a single unicode codepoint
+      // in S.  Similary,we can detect E: if a byte has
+      // prefix 1110111 (0xee), then that byte and the
+      // following 2 bytes encode a single unicode codepoint
+      // in E.
+
+      // Note that this is really a recursive process --
+      // maybe the char at pos 2 needs to dance, but any
+      // point in its dance, suddenly pos 4 needs to dance
+      // so you must finish pos 4 before returning to pos
+      // 2.  But then during pos 4's dance maybe pos 7 needs
+      // to dance, etc.  However, despite being recursive,
+      // we don't need to hold any state because the state
+      // can always be derived by looking at prior term &
+      // current term.
+
+      // TODO: can we avoid this copy?
+      if (termEnum.term() == null || termEnum.term().field() != fieldInfo.name) {
+        scratchTerm.length = 0;
+      } else {
+        scratchTerm.copy(termEnum.term().bytes());
+      }
+      
       if (DEBUG_SURROGATES) {
-        System.out.println("  check push newSuffix=" + newSuffixStart + " stack=" + getStack());
+        System.out.println("  dance");
+        System.out.println("    prev=" + UnicodeUtil.toHexString(prevTerm.utf8ToString()));
+        System.out.println("         " + prevTerm.toString());
+        System.out.println("    term=" + UnicodeUtil.toHexString(scratchTerm.utf8ToString()));
+        System.out.println("         " + scratchTerm.toString());
+      }
+
+      // This code assumes TermInfosReader/SegmentTermEnum
+      // always use BytesRef.offset == 0
+      assert prevTerm.offset == 0;
+      assert scratchTerm.offset == 0;
+
+      // Need to loop here because we may need to do multiple
+      // pops, and possibly a continue in the end, ie:
+      //
+      //  cont
+      //  pop, cont
+      //  pop, pop, cont
+      //  <nothing>
+      //
+
+      while(true) {
+        if (doContinue()) {
+          break;
+        } else {
+          if (!doPop()) {
+            break;
+          }
+        }
       }
-      final Term t = termEnum.term();
-      if (t == null || t.field() != fieldInfo.name) {
-        return false;
+
+      if (DEBUG_SURROGATES) {
+        System.out.println("  finish bmp ends");
       }
 
-      final BytesRef bytes = t.bytes();
-      UnicodeUtil.UTF8toUTF16(bytes.bytes, bytes.offset, bytes.length, termBuffer);
+      doPushes();
+    }
 
-      for(int i=Math.max(0,newSuffixStart);i<termBuffer.length;i++) {
-        final char ch = termBuffer.result[i];
-        if (ch >= UnicodeUtil.UNI_SUR_HIGH_START && ch <= UnicodeUtil.UNI_SUR_HIGH_END && (surrogateSeekUpto == 0 || i > surrogateSeekPending[surrogateSeekUpto-1])) {
 
-          if (DEBUG_SURROGATES) {
-            System.out.println("    found high surr 0x" + Integer.toHexString(ch) + " at pos=" + i);
-          }
+    // Look for seek type 1 ("push"): if the newly added
+    // suffix contains any S, we must try to seek to the
+    // corresponding E.  If we find a match, we go there;
+    // else we keep looking for additional S's in the new
+    // suffix.  This "starts" the dance, at this character
+    // position:
+    private void doPushes() throws IOException {
+
+      int upTo = newSuffixStart;
+      if (DEBUG_SURROGATES) {
+        System.out.println("  try push newSuffixStart=" + newSuffixStart + " scratchLen=" + scratchTerm.length);
+      }
 
-          // the next() that we just did read in a new
-          // suffix, containing a surrogate pair
+      while(upTo < scratchTerm.length) {
+        if (isNonBMPChar(scratchTerm.bytes, upTo) &&
+            (upTo > newSuffixStart ||
+             (upTo >= prevTerm.length ||
+              (!isNonBMPChar(prevTerm.bytes, upTo) &&
+               !isHighBMPChar(prevTerm.bytes, upTo))))) {
+
+          // A non-BMP char (4 bytes UTF8) starts here:
+          assert scratchTerm.length >= upTo + 4;
+          
+          final int savLength = scratchTerm.length;
+          scratch[0] = scratchTerm.bytes[upTo];
+          scratch[1] = scratchTerm.bytes[upTo+1];
+          scratch[2] = scratchTerm.bytes[upTo+2];
+
+          scratchTerm.bytes[upTo] = UTF8_HIGH_BMP_LEAD;
+          scratchTerm.bytes[upTo+1] = (byte) 0x80;
+          scratchTerm.bytes[upTo+2] = (byte) 0x80;
+          scratchTerm.length = upTo+3;
 
-          // seek forward to see if there are any terms with
-          // this same prefix, but with characters after the
-          // surrogate range; if so, we must first iterate
-          // them, then seek back to the surrogates
+          if (DEBUG_SURROGATES) {
+            System.out.println("    try seek 1 pos=" + upTo + " term=" + UnicodeUtil.toHexString(scratchTerm.utf8ToString()) + " " + scratchTerm.toString() + " len=" + scratchTerm.length);
+          }
 
-          char[] testPrefix = new char[i+2];
-          for(int j=0;j<i;j++) {
-            testPrefix[j] = termBuffer.result[j];
+          // Seek "forward":
+          // TODO: more efficient seek?
+          getTermsDict().seekEnum(seekTermEnum, protoTerm.createTerm(scratchTerm));
+
+          scratchTerm.bytes[upTo] = scratch[0];
+          scratchTerm.bytes[upTo+1] = scratch[1];
+          scratchTerm.bytes[upTo+2] = scratch[2];
+          scratchTerm.length = savLength;
+
+          // Did we find a match?
+          final Term t2 = seekTermEnum.term();
+            
+          if (DEBUG_SURROGATES) {
+            if (t2 == null) {
+              System.out.println("      hit term=null");
+            } else {
+              System.out.println("      hit term=" + UnicodeUtil.toHexString(t2.text()) + " " + (t2==null? null:t2.bytes()));
+            }
           }
-          testPrefix[i] = 1+UnicodeUtil.UNI_SUR_LOW_END;
 
-          getTermsDict().seekEnum(seekTermEnum, protoTerm.createTerm(new BytesRef(testPrefix, 0, i+1)));
+          // Since this was a seek "forward", we could hit
+          // EOF or a different field:
+          boolean matches;
 
-          Term t2 = seekTermEnum.term();
-          boolean isPrefix;
           if (t2 != null && t2.field() == fieldInfo.name) {
-
-            final BytesRef seekBytes = t2.bytes();
-            UnicodeUtil.UTF8toUTF16(seekBytes.bytes, seekBytes.offset, seekBytes.length, seekBuffer);
-
-            isPrefix = true;
-            if (DEBUG_SURROGATES) {
-              System.out.println("      seek found " + UnicodeUtil.toHexString(t2.text()));
-            }
-            for(int j=0;j<i;j++) {
-              if (testPrefix[j] != seekBuffer.result[j]) {
-                isPrefix = false;
-                break;
-              }
-            }
-            if (DEBUG_SURROGATES && !isPrefix) {
-              System.out.println("      no end terms");
+            final BytesRef b2 = t2.bytes();
+            assert b2.offset == 0;
+            if (b2.length >= upTo+3 && isHighBMPChar(b2.bytes, upTo)) {
+              matches = true;
+              for(int i=0;i<upTo;i++) {
+                if (scratchTerm.bytes[i] != b2.bytes[i]) {
+                  matches = false;
+                  break;
+                }
+              }              
+                
+            } else {
+              matches = false;
             }
           } else {
-            if (DEBUG_SURROGATES) {
-              System.out.println("      no end terms");
-            }
-            isPrefix = false;
+            matches = false;
           }
 
-          if (isPrefix) {
-            // we found a term, sharing the same prefix,
-            // with characters after the surrogates, so we
-            // must first enum those, and then return the
-            // the surrogates afterwards.  push that pending
-            // seek on the surrogates stack now:
-            pendingPrefix = testPrefix;
-
-            getTermsDict().seekEnum(termEnum, t2);
-
-            if (surrogateSeekUpto == surrogateSeekPending.length) {
-              surrogateSeekPending = ArrayUtil.grow(surrogateSeekPending);
-            }
-            if (surrogateSeekUpto == surrogateDidSeekBack.length) {
-              surrogateDidSeekBack = ArrayUtil.grow(surrogateDidSeekBack);
-            }
-            surrogateSeekPending[surrogateSeekUpto] = i;
-            surrogateDidSeekBack[surrogateSeekUpto] = false;
-            surrogateSeekUpto++;
+          if (matches) {
 
             if (DEBUG_SURROGATES) {
-              System.out.println("      do push " + i + "; end term=" + UnicodeUtil.toHexString(t2.text()));
+              System.out.println("      matches!");
             }
 
-            newSuffixStart = i+1;
-
-            return true;
+            // OK seek "back"
+            // TODO: more efficient seek?
+            getTermsDict().seekEnum(termEnum, seekTermEnum.term());
+
+            scratchTerm.copy(seekTermEnum.term().bytes());
+
+            // +3 because we don't need to check the char
+            // at upTo: we know it's > BMP
+            upTo += 3;
+
+            // NOTE: we keep iterating, now, since this
+            // can easily "recurse".  Ie, after seeking
+            // forward at a certain char position, we may
+            // find another surrogate in our [new] suffix
+            // and must then do another seek (recurse)
           } else {
-            // there are no terms after the surrogates, so
-            // we do nothing to the enum and just step
-            // through the surrogates like normal.  but we
-            // must keep iterating through the term, in case
-            // another surrogate pair appears later
+            upTo++;
           }
+        } else {
+          upTo++;
         }
       }
+    }
 
-      return false;
+    private boolean unicodeSortOrder;
+
+    void reset(FieldInfo fieldInfo) throws IOException {
+      //System.out.println("pff.reset te=" + termEnum);
+      this.fieldInfo = fieldInfo;
+      protoTerm = new Term(fieldInfo.name);
+      if (termEnum == null) {
+        termEnum = getTermsDict().terms(protoTerm);
+        seekTermEnum = getTermsDict().terms(protoTerm);
+        //System.out.println("  term=" + termEnum.term());
+      } else {
+        getTermsDict().seekEnum(termEnum, protoTerm);
+      }
+      skipNext = true;
+
+      unicodeSortOrder = sortTermsByUnicode();
+
+      final Term t = termEnum.term();
+      if (t != null && t.field() == fieldInfo.name) {
+        newSuffixStart = 0;
+        prevTerm.length = 0;
+        surrogateDance();
+      }
     }
 
     @Override
     public Comparator<BytesRef> getComparator() {
       // Pre-flex indexes always sorted in UTF16 order, but
       // we remap on-the-fly to unicode order
-      return BytesRef.getUTF8SortedAsUnicodeComparator();
+      if (unicodeSortOrder) {
+        return BytesRef.getUTF8SortedAsUnicodeComparator();
+      } else {
+        return BytesRef.getUTF8SortedAsUTF16Comparator();
+      }
     }
 
     @Override
@@ -484,7 +740,7 @@ public class PreFlexFields extends Field
     @Override
     public SeekStatus seek(BytesRef term, boolean useCache) throws IOException {
       if (DEBUG_SURROGATES) {
-        System.out.println("TE.seek() term=" + term.utf8ToString());
+        System.out.println("TE.seek target=" + UnicodeUtil.toHexString(term.utf8ToString()));
       }
       skipNext = false;
       final TermInfosReader tis = getTermsDict();
@@ -492,50 +748,142 @@ public class PreFlexFields extends Field
 
       assert termEnum != null;
 
-      if (termEnum == null) {
-        termEnum = tis.terms(t0);
-      } else {
-        tis.seekEnum(termEnum, t0);
-      }
-
-      surrogateSeekUpto = 0;
-      surrogatesDance();
+      tis.seekEnum(termEnum, t0);
 
       final Term t = termEnum.term();
 
-      final BytesRef tr = t == null ? null : t.bytes();
-
-      if (t != null && t.field() == fieldInfo.name && term.bytesEquals(tr)) {
-        current = tr;
+      if (t != null && t.field() == fieldInfo.name && term.bytesEquals(t.bytes())) {
+        // If we found an exact match, no need to do the
+        // surrogate dance
+        if (DEBUG_SURROGATES) {
+          System.out.println("  seek exact match");
+        }
+        current = t.bytes();
         return SeekStatus.FOUND;
       } else if (t == null || t.field() != fieldInfo.name) {
+
+        // TODO: maybe we can handle this like the next()
+        // into null?  set term as prevTerm then dance?
+
+        if (DEBUG_SURROGATES) {
+          System.out.println("  seek hit EOF");
+        }
+
+        // We hit EOF; try end-case surrogate dance: if we
+        // find an E, try swapping in S, backwards:
+        scratchTerm.copy(term);
+
+        assert scratchTerm.offset == 0;
+
+        for(int i=scratchTerm.length-1;i>=0;i--) {
+          if (isHighBMPChar(scratchTerm.bytes, i)) {
+            if (DEBUG_SURROGATES) {
+              System.out.println("    found E pos=" + i + "; try seek");
+            }
+
+            if (seekToNonBMP(seekTermEnum, scratchTerm, i)) {
+
+              scratchTerm.copy(seekTermEnum.term().bytes());
+              getTermsDict().seekEnum(termEnum, seekTermEnum.term());
+
+              newSuffixStart = 1+i;
+
+              doPushes();
+
+              // Found a match
+              // TODO: faster seek?
+              current = termEnum.term().bytes();
+              return SeekStatus.NOT_FOUND;
+            }
+          }
+        }
+        
+        if (DEBUG_SURROGATES) {
+          System.out.println("  seek END");
+        }
+
         current = null;
         return SeekStatus.END;
       } else {
-        current = tr;
-        return SeekStatus.NOT_FOUND;
+
+        // We found a non-exact but non-null term; this one
+        // is fun -- just treat it like next, by pretending
+        // requested term was prev:
+        prevTerm.copy(term);
+
+        if (DEBUG_SURROGATES) {
+          System.out.println("  seek hit non-exact term=" + UnicodeUtil.toHexString(t.text()));
+        }
+
+        final BytesRef br = t.bytes();
+        assert br.offset == 0;
+
+        setNewSuffixStart(term, br);
+
+        surrogateDance();
+
+        final Term t2 = termEnum.term();
+        if (t2 == null || t2.field() != fieldInfo.name) {
+          assert t2 == null || !t2.field().equals(fieldInfo.name); // make sure fields are in fact interned
+          current = null;
+          return SeekStatus.END;
+        } else {
+          current = t2.bytes();
+          assert !unicodeSortOrder || term.compareTo(current) < 0 : "term=" + UnicodeUtil.toHexString(term.utf8ToString()) + " vs current=" + UnicodeUtil.toHexString(current.utf8ToString());
+          return SeekStatus.NOT_FOUND;
+        }
+      }
+    }
+
+    private void setNewSuffixStart(BytesRef br1, BytesRef br2) {
+      final int limit = Math.min(br1.length, br2.length);
+      int lastStart = 0;
+      for(int i=0;i<limit;i++) {
+        if ((br1.bytes[br1.offset+i] & 0xc0) == 0xc0 || (br1.bytes[br1.offset+i] & 0x80) == 0) {
+          lastStart = i;
+        }
+        if (br1.bytes[br1.offset+i] != br2.bytes[br2.offset+i]) {
+          newSuffixStart = lastStart;
+          if (DEBUG_SURROGATES) {
+            System.out.println("    set newSuffixStart=" + newSuffixStart);
+          }
+          return;
+        }
+      }
+      newSuffixStart = limit;
+      if (DEBUG_SURROGATES) {
+        System.out.println("    set newSuffixStart=" + newSuffixStart);
       }
     }
 
     @Override
     public BytesRef next() throws IOException {
       if (DEBUG_SURROGATES) {
-        System.out.println("TE.next() skipNext=" + skipNext);
+        System.out.println("TE.next()");
       }
       if (skipNext) {
+        if (DEBUG_SURROGATES) {
+          System.out.println("  skipNext=true");
+        }
         skipNext = false;
         if (termEnum.term() == null) {
           return null;
+        } else if (termEnum.term().field() != fieldInfo.name) {
+          return null;
         } else {
           return current = termEnum.term().bytes();
         }
       }
+
+      // TODO: can we use STE's prevBuffer here?
+      prevTerm.copy(termEnum.term().bytes());
+
       if (termEnum.next() && termEnum.term().field() == fieldInfo.name) {
         newSuffixStart = termEnum.newSuffixStart;
         if (DEBUG_SURROGATES) {
-          System.out.println("  set newSuffixStart=" + newSuffixStart);
+          System.out.println("  newSuffixStart=" + newSuffixStart);
         }
-        surrogatesDance();
+        surrogateDance();
         final Term t = termEnum.term();
         if (t == null || t.field() != fieldInfo.name) {
           assert t == null || !t.field().equals(fieldInfo.name); // make sure fields are in fact interned
@@ -545,12 +893,15 @@ public class PreFlexFields extends Field
         }
         return current;
       } else {
+        // This field is exhausted, but we have to give
+        // surrogateDance a chance to seek back:
         if (DEBUG_SURROGATES) {
-          System.out.println("  force pop");
+          System.out.println("  force cont");
         }
-        // force pop
-        newSuffixStart = -1;
-        surrogatesDance();
+        //newSuffixStart = prevTerm.length;
+        newSuffixStart = 0;
+        surrogateDance();
+        
         final Term t = termEnum.term();
         if (t == null || t.field() != fieldInfo.name) {
           assert t == null || !t.field().equals(fieldInfo.name); // make sure fields are in fact interned
@@ -574,20 +925,32 @@ public class PreFlexFields extends Field
 
     @Override
     public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
-      if (reuse != null) {
-        return ((PreDocsEnum) reuse).reset(termEnum, skipDocs);        
+      PreDocsEnum docsEnum;
+      if (reuse == null || !(reuse instanceof PreDocsEnum)) {
+        docsEnum = new PreDocsEnum();
       } else {
-        return (new PreDocsEnum()).reset(termEnum, skipDocs);
+        docsEnum = (PreDocsEnum) reuse;
+        if (docsEnum.getFreqStream() != freqStream) {
+          docsEnum = new PreDocsEnum();
+        }
       }
+      return docsEnum.reset(termEnum, skipDocs);
     }
 
     @Override
     public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
-      if (reuse != null) {
-        return ((PreDocsAndPositionsEnum) reuse).reset(termEnum, skipDocs);        
+      PreDocsAndPositionsEnum docsPosEnum;
+      if (fieldInfo.omitTermFreqAndPositions) {
+        return null;
+      } else if (reuse == null || !(reuse instanceof PreDocsAndPositionsEnum)) {
+        docsPosEnum = new PreDocsAndPositionsEnum();
       } else {
-        return (new PreDocsAndPositionsEnum()).reset(termEnum, skipDocs);
+        docsPosEnum = (PreDocsAndPositionsEnum) reuse;
+        if (docsPosEnum.getFreqStream() != freqStream) {
+          docsPosEnum = new PreDocsAndPositionsEnum();
+        }
       }
+      return docsPosEnum.reset(termEnum, skipDocs);        
     }
   }
 
@@ -598,6 +961,10 @@ public class PreFlexFields extends Field
       docs = new SegmentTermDocs(freqStream, getTermsDict(), fieldInfos);
     }
 
+    IndexInput getFreqStream() {
+      return freqStream;
+    }
+
     public PreDocsEnum reset(SegmentTermEnum termEnum, Bits skipDocs) throws IOException {
       docs.setSkipDocs(skipDocs);
       docs.seek(termEnum);
@@ -650,6 +1017,10 @@ public class PreFlexFields extends Field
       pos = new SegmentTermPositions(freqStream, proxStream, getTermsDict(), fieldInfos);
     }
 
+    IndexInput getFreqStream() {
+      return freqStream;
+    }
+
     public DocsAndPositionsEnum reset(SegmentTermEnum termEnum, Bits skipDocs) throws IOException {
       pos.setSkipDocs(skipDocs);
       pos.seek(termEnum);

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/SegmentTermEnum.java Mon Jul 26 21:35:43 2010
@@ -84,26 +84,16 @@ public final class SegmentTermEnum imple
       format = firstInt;
 
       // check that it is a format we can understand
-    if (format > FORMAT_MINIMUM)
-      throw new IndexFormatTooOldException(null, format, FORMAT_MINIMUM, FORMAT_CURRENT);
-    if (format < FORMAT_CURRENT)
-      throw new IndexFormatTooNewException(null, format, FORMAT_MINIMUM, FORMAT_CURRENT);
+      if (format > FORMAT_MINIMUM)
+        throw new IndexFormatTooOldException(null, format, FORMAT_MINIMUM, FORMAT_CURRENT);
+      if (format < FORMAT_CURRENT)
+        throw new IndexFormatTooNewException(null, format, FORMAT_MINIMUM, FORMAT_CURRENT);
 
       size = input.readLong();                    // read the size
       
-      if(format == -1){
-        if (!isIndex) {
-          indexInterval = input.readInt();
-          formatM1SkipInterval = input.readInt();
-        }
-        // switch off skipTo optimization for file format prior to 1.4rc2 in order to avoid a bug in 
-        // skipTo implementation of these versions
-        skipInterval = Integer.MAX_VALUE;
-      } else {
-        indexInterval = input.readInt();
-        skipInterval = input.readInt();
-        maxSkipLevels = input.readInt();
-      }
+      indexInterval = input.readInt();
+      skipInterval = input.readInt();
+      maxSkipLevels = input.readInt();
       assert indexInterval > 0: "indexInterval=" + indexInterval + " is negative; must be > 0";
       assert skipInterval > 0: "skipInterval=" + skipInterval + " is negative; must be > 0";
     }
@@ -132,18 +122,21 @@ public final class SegmentTermEnum imple
     position = p;
     termBuffer.set(t);
     prevBuffer.reset();
+    //System.out.println("  ste doSeek prev=" + prevBuffer.toTerm() + " this=" + this);
     termInfo.set(ti);
   }
 
   /** Increments the enumeration to the next element.  True if one exists.*/
   public final boolean next() throws IOException {
+    prevBuffer.set(termBuffer);
+    //System.out.println("  ste setPrev=" + prev() + " this=" + this);
+
     if (position++ >= size - 1) {
-      prevBuffer.set(termBuffer);
       termBuffer.reset();
+      //System.out.println("    EOF");
       return false;
     }
 
-    prevBuffer.set(termBuffer);
     termBuffer.read(input, fieldInfos);
     newSuffixStart = termBuffer.newSuffixStart;
 
@@ -168,6 +161,7 @@ public final class SegmentTermEnum imple
     if (isIndex)
       indexPointer += input.readVLong();	  // read index pointer
 
+    //System.out.println("  ste ret term=" + term());
     return true;
   }
 

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermBuffer.java Mon Jul 26 21:35:43 2010
@@ -18,9 +18,10 @@ package org.apache.lucene.index.codecs.p
  */
 
 import java.io.IOException;
+import java.util.Comparator;
+
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.UnicodeUtil;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.FieldInfos;
 
@@ -28,102 +29,65 @@ final class TermBuffer implements Clonea
 
   private String field;
   private Term term;                            // cached
-  private boolean dirty;                          // true if text was set externally (ie not read via UTF8 bytes)
 
-  private UnicodeUtil.UTF16Result text = new UnicodeUtil.UTF16Result();
   private BytesRef bytes = new BytesRef(10);
 
-  int newSuffixStart;
+  private static final Comparator<BytesRef> utf8AsUTF16Comparator = BytesRef.getUTF8SortedAsUTF16Comparator();
+
+  int newSuffixStart;                             // only valid right after .read is called
 
-  public final int compareTo(TermBuffer other) {
+  public int compareTo(TermBuffer other) {
     if (field == other.field) 	  // fields are interned
-      return compareChars(text.result, text.length, other.text.result, other.text.length);
+      return utf8AsUTF16Comparator.compare(bytes, other.bytes);
     else
       return field.compareTo(other.field);
   }
 
-  private static int compareChars(char[] chars1, int len1,
-                                  char[] chars2, int len2) {
-    final int end = len1 < len2 ? len1:len2;
-    for (int k = 0; k < end; k++) {
-      char c1 = chars1[k];
-      char c2 = chars2[k];
-      if (c1 != c2) {
-        return c1 - c2;
-      }
-    }
-    return len1 - len2;
-  }
-
-  public final void read(IndexInput input, FieldInfos fieldInfos)
+  public void read(IndexInput input, FieldInfos fieldInfos)
     throws IOException {
     this.term = null;                           // invalidate cache
-    int start = input.readVInt();
+    newSuffixStart = input.readVInt();
     int length = input.readVInt();
-    int totalLength = start + length;
+    int totalLength = newSuffixStart + length;
     if (bytes.bytes.length < totalLength) {
       bytes.grow(totalLength);
     }
-    if (dirty) {
-      // Fully convert all bytes since bytes is dirty
-      UnicodeUtil.UTF16toUTF8(text.result, 0, text.length, bytes);
-      bytes.length = totalLength;
-      input.readBytes(bytes.bytes, start, length);
-      UnicodeUtil.UTF8toUTF16(bytes.bytes, 0, totalLength, text);
-      dirty = false;
-    } else {
-      // Incrementally convert only the UTF8 bytes that are new:
-      bytes.length = totalLength;
-      input.readBytes(bytes.bytes, start, length);
-      UnicodeUtil.UTF8toUTF16(bytes.bytes, start, length, text);
-    }
-
-    while(true) {
-      newSuffixStart = text.offsets[start];
-      if (newSuffixStart != -1) {
-        break;
-      }
-      if (--start == 0) {
-        newSuffixStart = 0;
-        break;
-      }
-    }
+    bytes.length = totalLength;
+    input.readBytes(bytes.bytes, newSuffixStart, length);
     this.field = fieldInfos.fieldName(input.readVInt());
   }
 
-  public final void set(Term term) {
+  public void set(Term term) {
     if (term == null) {
       reset();
       return;
     }
-    
-    final BytesRef termBytes = term.bytes();
-    UnicodeUtil.UTF8toUTF16(termBytes.bytes, termBytes.offset, termBytes.length, text);
-    dirty = true;
+    bytes.copy(term.bytes());
     field = term.field();
     this.term = term;
   }
 
-  public final void set(TermBuffer other) {
-    text.copyText(other.text);
-    dirty = true;
+  public void set(TermBuffer other) {
     field = other.field;
-    term = other.term;
+    // dangerous to copy Term over, since the underlying
+    // BytesRef could subsequently be modified:
+    term = null;
+    bytes.copy(other.bytes);
   }
 
   public void reset() {
     field = null;
-    text.setLength(0);
     term = null;
-    dirty = true;
   }
 
   public Term toTerm() {
     if (field == null)                            // unset
       return null;
 
-    if (term == null)
-      term = new Term(field, new BytesRef(text.result, 0, text.length), false);
+    if (term == null) {
+      term = new Term(field, new BytesRef(bytes), false);
+      //term = new Term(field, bytes, false);
+    }
 
     return term;
   }
@@ -134,12 +98,7 @@ final class TermBuffer implements Clonea
     try {
       clone = (TermBuffer)super.clone();
     } catch (CloneNotSupportedException e) {}
-    clone.dirty = true;
-    clone.bytes = new BytesRef(10);
-    clone.text = new UnicodeUtil.UTF16Result();
-    clone.text.offsets = new int[text.offsets.length];
-    System.arraycopy(text.offsets, 0, clone.text.offsets, 0, text.offsets.length);
-    clone.text.copyText(text);
+    clone.bytes = new BytesRef(bytes);
     return clone;
   }
 }

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfo.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfo.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfo.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfo.java Mon Jul 26 21:35:43 2010
@@ -23,30 +23,30 @@ package org.apache.lucene.index.codecs.p
  * indexing. */
 
 @Deprecated
-class TermInfo {
+public class TermInfo {
   /** The number of documents which contain the term. */
-  int docFreq = 0;
+  public int docFreq = 0;
 
-  long freqPointer = 0;
-  long proxPointer = 0;
-  int skipOffset;
+  public long freqPointer = 0;
+  public long proxPointer = 0;
+  public int skipOffset;
 
-  TermInfo() {}
+  public TermInfo() {}
 
-  TermInfo(int df, long fp, long pp) {
+  public TermInfo(int df, long fp, long pp) {
     docFreq = df;
     freqPointer = fp;
     proxPointer = pp;
   }
 
-  TermInfo(TermInfo ti) {
+  public TermInfo(TermInfo ti) {
     docFreq = ti.docFreq;
     freqPointer = ti.freqPointer;
     proxPointer = ti.proxPointer;
     skipOffset = ti.skipOffset;
   }
 
-  final void set(int docFreq,
+  public final void set(int docFreq,
                  long freqPointer, long proxPointer, int skipOffset) {
     this.docFreq = docFreq;
     this.freqPointer = freqPointer;
@@ -54,7 +54,7 @@ class TermInfo {
     this.skipOffset = skipOffset;
   }
 
-  final void set(TermInfo ti) {
+  public final void set(TermInfo ti) {
     docFreq = ti.docFreq;
     freqPointer = ti.freqPointer;
     proxPointer = ti.proxPointer;

Modified: lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java?rev=979453&r1=979452&r2=979453&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java (original)
+++ lucene/dev/trunk/lucene/src/java/org/apache/lucene/index/codecs/preflex/TermInfosReader.java Mon Jul 26 21:35:43 2010
@@ -119,9 +119,12 @@ public final class TermInfosReader {
           indexTerms = new Term[indexSize];
           indexInfos = new TermInfo[indexSize];
           indexPointers = new long[indexSize];
-        
-          for (int i = 0; indexEnum.next(); i++) {
+
+          for (int i=0;indexEnum.next(); i++) {
             indexTerms[i] = indexEnum.term();
+            assert indexTerms[i] != null;
+            assert indexTerms[i].text() != null;
+            assert indexTerms[i].field() != null;
             indexInfos[i] = indexEnum.termInfo();
             indexPointers[i] = indexEnum.indexPointer;
         
@@ -160,14 +163,14 @@ public final class TermInfosReader {
     return origEnum.maxSkipLevels;
   }
 
-  final void close() throws IOException {
+  void close() throws IOException {
     if (origEnum != null)
       origEnum.close();
     threadResources.close();
   }
 
   /** Returns the number of term/value pairs in the set. */
-  final long size() {
+  long size() {
     return size;
   }
 
@@ -183,12 +186,13 @@ public final class TermInfosReader {
 
 
   /** Returns the offset of the greatest index entry which is less than or equal to term.*/
-  private final int getIndexOffset(Term term) {
+  private int getIndexOffset(Term term) {
     int lo = 0;					  // binary search indexTerms[]
     int hi = indexTerms.length - 1;
 
     while (hi >= lo) {
       int mid = (lo + hi) >>> 1;
+      assert indexTerms[mid] != null : "indexTerms = " + indexTerms.length + " mid=" + mid;
       int delta = term.compareToUTF16(indexTerms[mid]);
       if (delta < 0)
 	hi = mid - 1;
@@ -200,7 +204,7 @@ public final class TermInfosReader {
     return hi;
   }
 
-  private final void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
+  private void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
     enumerator.seek(indexPointers[indexOffset],
                     ((long) indexOffset * totalIndexInterval) - 1,
                     indexTerms[indexOffset], indexInfos[indexOffset]);
@@ -231,6 +235,9 @@ public final class TermInfosReader {
   }
 
   TermInfo seekEnum(SegmentTermEnum enumerator, Term term, TermInfoAndOrd tiOrd) throws IOException {
+    if (size == 0) {
+      return null;
+    }
 
     // optimize sequential access: first try scanning cached enum w/o seeking
     if (enumerator.term() != null                 // term is at or past current
@@ -242,7 +249,6 @@ public final class TermInfosReader {
        // no need to seek
 
         final TermInfo ti;
-
         int numScans = enumerator.scanTo(term);
         if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
           ti = enumerator.termInfo();
@@ -279,6 +285,7 @@ public final class TermInfosReader {
     seekEnum(enumerator, indexPos);
     enumerator.scanTo(term);
     final TermInfo ti;
+
     if (enumerator.term() != null && term.compareToUTF16(enumerator.term()) == 0) {
       ti = enumerator.termInfo();
       if (tiOrd == null) {
@@ -294,7 +301,7 @@ public final class TermInfosReader {
   }
 
   // called only from asserts
-  private final boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
+  private boolean sameTermInfo(TermInfo ti1, TermInfo ti2, SegmentTermEnum enumerator) {
     if (ti1.docFreq != ti2.docFreq) {
       return false;
     }
@@ -319,7 +326,7 @@ public final class TermInfosReader {
   }
 
   /** Returns the position of a Term in the set or -1. */
-  final long getPosition(Term term) throws IOException {
+  long getPosition(Term term) throws IOException {
     if (size == 0) return -1;
 
     ensureIndexIsRead();

Propchange: lucene/dev/trunk/lucene/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Jul 26 21:35:43 2010
@@ -1,4 +1,5 @@
 /lucene/dev/branches/branch_3x/lucene/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java:943137,949730,957490,960490,961612,979161
+/lucene/dev/branches/preflexfixes/lucene/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java:967125-979432
 /lucene/java/branches/flex_1458/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java:824912-931101
 /lucene/java/branches/lucene_2_9/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java:909334,948516
 /lucene/java/trunk/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java:924483-924731,924781,925176-925462