You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2011/10/10 21:58:32 UTC

svn commit: r1181188 [6/8] - in /lucene/dev/branches/solrcloud: ./ dev-tools/eclipse/ dev-tools/idea/.idea/ dev-tools/idea/lucene/contrib/ dev-tools/idea/modules/grouping/ dev-tools/idea/solr/ dev-tools/idea/solr/contrib/analysis-extras/ dev-tools/idea...

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java Mon Oct 10 19:58:24 2011
@@ -47,8 +47,6 @@ import org.apache.lucene.store.Directory
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
-import org.apache.lucene.util.FloatsRef;
-import org.apache.lucene.util.LongsRef;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
 import org.junit.Before;
@@ -136,7 +134,6 @@ public class TestDocValuesIndexing exten
     Collections.shuffle(values, random);
     ValueType first = values.get(0);
     ValueType second = values.get(1);
-    String msg = "[first=" + first.name() + ", second=" + second.name() + "]";
     // index first index
     Directory d_1 = newDirectory();
     IndexWriter w_1 = new IndexWriter(d_1, writerConfig(random.nextBoolean()));
@@ -171,36 +168,66 @@ public class TestDocValuesIndexing exten
     // check values
     
     IndexReader merged = IndexReader.open(w, true);
-    ValuesEnum vE_1 = getValuesEnum(getDocValues(r_1, first.name()));
-    ValuesEnum vE_2 = getValuesEnum(getDocValues(r_2, second.name()));
-    ValuesEnum vE_1_merged = getValuesEnum(getDocValues(merged, first.name()));
-    ValuesEnum vE_2_merged = getValuesEnum(getDocValues(merged, second
+    Source source_1 = getSource(getDocValues(r_1, first.name()));
+    Source source_2 = getSource(getDocValues(r_2, second.name()));
+    Source source_1_merged = getSource(getDocValues(merged, first.name()));
+    Source source_2_merged = getSource(getDocValues(merged, second
         .name()));
-    switch (second) { // these variants don't advance over missing values
-    case BYTES_FIXED_STRAIGHT:
-    case BYTES_VAR_STRAIGHT:
-    case FLOAT_32:
-    case FLOAT_64:
-    case VAR_INTS:
-    case FIXED_INTS_16:
-    case FIXED_INTS_32:
-    case FIXED_INTS_64:
-    case FIXED_INTS_8:
-      assertEquals(msg, valuesPerIndex-1, vE_2_merged.advance(valuesPerIndex-1));
+    for (int i = 0; i < r_1.maxDoc(); i++) {
+      switch (first) {
+      case BYTES_FIXED_DEREF:
+      case BYTES_FIXED_STRAIGHT:
+      case BYTES_VAR_DEREF:
+      case BYTES_VAR_STRAIGHT:
+      case BYTES_FIXED_SORTED:
+      case BYTES_VAR_SORTED:
+        assertEquals(source_1.getBytes(i, new BytesRef()),
+            source_1_merged.getBytes(i, new BytesRef()));
+        break;
+      case FIXED_INTS_16:
+      case FIXED_INTS_32:
+      case FIXED_INTS_64:
+      case FIXED_INTS_8:
+      case VAR_INTS:
+        assertEquals(source_1.getInt(i), source_1_merged.getInt(i));
+        break;
+      case FLOAT_32:
+      case FLOAT_64:
+        assertEquals(source_1.getFloat(i), source_1_merged.getFloat(i), 0.0d);
+        break;
+      default:
+        fail("unkonwn " + first);
+      }
     }
-    
-    for (int i = 0; i < valuesPerIndex; i++) {
-      assertEquals(msg, i, vE_1.nextDoc());
-      assertEquals(msg, i, vE_1_merged.nextDoc());
-
-      assertEquals(msg, i, vE_2.nextDoc());
-      assertEquals(msg, i + valuesPerIndex, vE_2_merged.nextDoc());
-    }
-    assertEquals(msg, ValuesEnum.NO_MORE_DOCS, vE_1.nextDoc());
-    assertEquals(msg, ValuesEnum.NO_MORE_DOCS, vE_2.nextDoc());
-    assertEquals(msg, ValuesEnum.NO_MORE_DOCS, vE_1_merged.advance(valuesPerIndex*2));
-    assertEquals(msg, ValuesEnum.NO_MORE_DOCS, vE_2_merged.nextDoc());
 
+    for (int i = r_1.maxDoc(); i < merged.maxDoc(); i++) {
+      switch (second) {
+      case BYTES_FIXED_DEREF:
+      case BYTES_FIXED_STRAIGHT:
+      case BYTES_VAR_DEREF:
+      case BYTES_VAR_STRAIGHT:
+      case BYTES_FIXED_SORTED:
+      case BYTES_VAR_SORTED:
+        assertEquals(source_2.getBytes(i - r_1.maxDoc(), new BytesRef()),
+            source_2_merged.getBytes(i, new BytesRef()));
+        break;
+      case FIXED_INTS_16:
+      case FIXED_INTS_32:
+      case FIXED_INTS_64:
+      case FIXED_INTS_8:
+      case VAR_INTS:
+        assertEquals(source_2.getInt(i - r_1.maxDoc()),
+            source_2_merged.getInt(i));
+        break;
+      case FLOAT_32:
+      case FLOAT_64:
+        assertEquals(source_2.getFloat(i - r_1.maxDoc()),
+            source_2_merged.getFloat(i), 0.0d);
+        break;
+      default:
+        fail("unkonwn " + first);
+      }
+    }
     // close resources
     r_1.close();
     r_2.close();
@@ -260,22 +287,12 @@ public class TestDocValuesIndexing exten
           assertEquals("index " + i, 0, value);
         }
 
-        ValuesEnum intsEnum = getValuesEnum(intsReader);
-        assertTrue(intsEnum.advance(base) >= base);
-
-        intsEnum = getValuesEnum(intsReader);
-        LongsRef enumRef = intsEnum.getInt();
-
         int expected = 0;
         for (int i = base; i < r.numDocs(); i++, expected++) {
           while (deleted.get(expected)) {
             expected++;
           }
-          assertEquals("advance failed at index: " + i + " of " + r.numDocs()
-              + " docs", i, intsEnum.advance(i));
           assertEquals(val + " mod: " + mod + " index: " +  i, expected%mod, ints.getInt(i));
-          assertEquals(expected%mod, enumRef.get());
-
         }
       }
         break;
@@ -289,20 +306,11 @@ public class TestDocValuesIndexing exten
           assertEquals(val + " failed for doc: " + i + " base: " + base,
               0.0d, value, 0.0d);
         }
-        ValuesEnum floatEnum = getValuesEnum(floatReader);
-        assertTrue(floatEnum.advance(base) >= base);
-
-        floatEnum = getValuesEnum(floatReader);
-        FloatsRef enumRef = floatEnum.getFloat();
         int expected = 0;
         for (int i = base; i < r.numDocs(); i++, expected++) {
           while (deleted.get(expected)) {
             expected++;
           }
-          assertEquals("advance failed at index: " + i + " of " + r.numDocs()
-              + " docs base:" + base, i, floatEnum.advance(i));
-          assertEquals(floatEnum.getClass() + " index " + i, 2.0 * expected,
-              enumRef.get(), 0.00001);
           assertEquals("index " + i, 2.0 * expected, floats.getFloat(i),
               0.00001);
         }
@@ -320,7 +328,7 @@ public class TestDocValuesIndexing exten
     w.close();
     d.close();
   }
-
+  
   public void runTestIndexBytes(IndexWriterConfig cfg, boolean withDeletions)
       throws CorruptIndexException, LockObtainFailedException, IOException {
     final Directory d = newDirectory();
@@ -353,6 +361,8 @@ public class TestDocValuesIndexing exten
         switch (byteIndexValue) {
         case BYTES_VAR_STRAIGHT:
         case BYTES_FIXED_STRAIGHT:
+        case BYTES_FIXED_DEREF:
+        case BYTES_FIXED_SORTED:
           // fixed straight returns bytesref with zero bytes all of fixed
           // length
           assertNotNull("expected none null - " + msg, br);
@@ -365,23 +375,13 @@ public class TestDocValuesIndexing exten
             }
           }
           break;
-        case BYTES_VAR_SORTED:
-        case BYTES_FIXED_SORTED:
-        case BYTES_VAR_DEREF:
-        case BYTES_FIXED_DEREF:
         default:
           assertNotNull("expected none null - " + msg, br);
-          assertEquals(0, br.length);
+          assertEquals(byteIndexValue + "", 0, br.length);
           // make sure we advance at least until base
-          ValuesEnum bytesEnum = getValuesEnum(bytesReader);
-          final int advancedTo = bytesEnum.advance(0);
-          assertTrue(byteIndexValue.name() + " advanced failed base:" + base
-              + " advancedTo: " + advancedTo, base <= advancedTo);
         }
       }
 
-      ValuesEnum bytesEnum = getValuesEnum(bytesReader);
-      final BytesRef enumRef = bytesEnum.bytes();
       // test the actual doc values added in this iteration
       assertEquals(base + numRemainingValues, r.numDocs());
       int v = 0;
@@ -393,17 +393,8 @@ public class TestDocValuesIndexing exten
           upto += bytesSize;
         }
         BytesRef br = bytes.getBytes(i, new BytesRef());
-        if (bytesEnum.docID() != i) {
-          assertEquals("seek failed for index " + i + " " + msg, i, bytesEnum
-              .advance(i));
-        }
         assertTrue(msg, br.length > 0);
         for (int j = 0; j < br.length; j++, upto++) {
-          assertTrue(" enumRef not initialized " + msg,
-              enumRef.bytes.length > 0);
-          assertEquals(
-              "EnumRef Byte at index " + j + " doesn't match - " + msg, upto,
-              enumRef.bytes[enumRef.offset + j]);
           if (!(br.bytes.length > br.offset + j))
             br = bytes.getBytes(i, new BytesRef());
           assertTrue("BytesRef index exceeded [" + msg + "] offset: "
@@ -446,33 +437,23 @@ public class TestDocValuesIndexing exten
   }
 
   private Source getSource(IndexDocValues values) throws IOException {
-    Source source;
-    if (random.nextInt(10) == 0) {
-      source = values.load();
-    } else {
-      // getSource uses cache internally
-      source = values.getSource();
+    // getSource uses cache internally
+    switch(random.nextInt(5)) {
+    case 3:
+      return values.load();
+    case 2:
+      return values.getDirectSource();
+    case 1:
+      return values.getSource();
+    default:
+      return values.getSource();
     }
-    assertNotNull(source);
-    return source;
   }
 
-  private ValuesEnum getValuesEnum(IndexDocValues values) throws IOException {
-    ValuesEnum valuesEnum;
-    if (!(values instanceof MultiIndexDocValues) && random.nextInt(10) == 0) {
-      // TODO not supported by MultiDocValues yet!
-      valuesEnum = getSource(values).getEnum();
-    } else {
-      valuesEnum = values.getEnum();
-
-    }
-    assertNotNull(valuesEnum);
-    return valuesEnum;
-  }
 
   private static EnumSet<ValueType> BYTES = EnumSet.of(ValueType.BYTES_FIXED_DEREF,
-      ValueType.BYTES_FIXED_SORTED, ValueType.BYTES_FIXED_STRAIGHT, ValueType.BYTES_VAR_DEREF,
-      ValueType.BYTES_VAR_SORTED, ValueType.BYTES_VAR_STRAIGHT);
+      ValueType.BYTES_FIXED_STRAIGHT, ValueType.BYTES_VAR_DEREF,
+      ValueType.BYTES_VAR_STRAIGHT, ValueType.BYTES_FIXED_SORTED, ValueType.BYTES_VAR_SORTED);
 
   private static EnumSet<ValueType> NUMERICS = EnumSet.of(ValueType.VAR_INTS,
       ValueType.FIXED_INTS_16, ValueType.FIXED_INTS_32,

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java Mon Oct 10 19:58:24 2011
@@ -25,6 +25,7 @@ import org.apache.lucene.search.similari
 import org.apache.lucene.search.similarities.Similarity.ExactDocScorer;
 import org.apache.lucene.search.similarities.Similarity.SloppyDocScorer;
 import org.apache.lucene.search.similarities.Similarity.Stats;
+import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.TermContext;
 import org.apache.lucene.index.FieldInvertState;
@@ -364,7 +365,8 @@ final class JustCompileSearch {
     }
 
     @Override
-    public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext)
+    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
+        boolean topScorer, Bits acceptDocs)
         throws IOException {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java Mon Oct 10 19:58:24 2011
@@ -28,7 +28,6 @@ import org.apache.lucene.index.SlowMulti
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Weight.ScorerContext;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
 import org.apache.lucene.search.similarities.Similarity;
@@ -178,7 +177,8 @@ public class TestDisjunctionMaxQuery ext
     QueryUtils.check(random, dq, s);
     assertTrue(s.getTopReaderContext().isAtomic);
     final Weight dw = s.createNormalizedWeight(dq);
-    final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
+    AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext();
+    final Scorer ds = dw.scorer(context, true, false, context.reader.getLiveDocs());
     final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
     if (skipOk) {
       fail("firsttime skipTo found a match? ... "
@@ -193,7 +193,8 @@ public class TestDisjunctionMaxQuery ext
     assertTrue(s.getTopReaderContext().isAtomic);
     QueryUtils.check(random, dq, s);
     final Weight dw = s.createNormalizedWeight(dq);
-    final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
+    AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext();
+    final Scorer ds = dw.scorer(context, true, false, context.reader.getLiveDocs());
     assertTrue("firsttime skipTo found no match",
         ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
     assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/TestTermScorer.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/TestTermScorer.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/TestTermScorer.java Mon Oct 10 19:58:24 2011
@@ -29,7 +29,6 @@ import org.apache.lucene.index.IndexRead
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SlowMultiReaderWrapper;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Weight.ScorerContext;
 import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
@@ -79,7 +78,8 @@ public class TestTermScorer extends Luce
     
     Weight weight = indexSearcher.createNormalizedWeight(termQuery);
     assertTrue(indexSearcher.getTopReaderContext().isAtomic);
-    Scorer ts = weight.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
+    AtomicReaderContext context = (AtomicReaderContext)indexSearcher.getTopReaderContext();
+    Scorer ts = weight.scorer(context, true, true, context.reader.getLiveDocs());
     // we have 2 documents with the term all in them, one document for all the
     // other values
     final List<TestHit> docs = new ArrayList<TestHit>();
@@ -140,7 +140,8 @@ public class TestTermScorer extends Luce
     
     Weight weight = indexSearcher.createNormalizedWeight(termQuery);
     assertTrue(indexSearcher.getTopReaderContext().isAtomic);
-    Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
+    AtomicReaderContext context = (AtomicReaderContext) indexSearcher.getTopReaderContext();
+    Scorer ts = weight.scorer(context, true, true, context.reader.getLiveDocs());
     assertTrue("next did not return a doc",
         ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
     assertTrue("score is not correct", ts.score() == 1.6931472f);
@@ -158,8 +159,8 @@ public class TestTermScorer extends Luce
     
     Weight weight = indexSearcher.createNormalizedWeight(termQuery);
     assertTrue(indexSearcher.getTopReaderContext().isAtomic);
-
-    Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
+    AtomicReaderContext context = (AtomicReaderContext) indexSearcher.getTopReaderContext();
+    Scorer ts = weight.scorer(context, true, true, context.reader.getLiveDocs());
     assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
     // The next doc should be doc 5
     assertTrue("doc should be number 5", ts.docID() == 5);

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java Mon Oct 10 19:58:24 2011
@@ -23,6 +23,7 @@ import java.util.Collection;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.Bits;
 
 /**
  * Holds all implementations of classes in the o.a.l.s.spans package as a
@@ -82,7 +83,7 @@ final class JustCompileSearchSpans {
     }
 
     @Override
-    public Spans getSpans(AtomicReaderContext context) throws IOException {
+    public Spans getSpans(AtomicReaderContext context, Bits acceptDocs) throws IOException {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
 

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java Mon Oct 10 19:58:24 2011
@@ -49,7 +49,7 @@ public class MultiSpansWrapper extends S
   public static Spans wrap(ReaderContext topLevelReaderContext, SpanQuery query) throws IOException {
     AtomicReaderContext[] leaves = ReaderUtil.leaves(topLevelReaderContext);
     if(leaves.length == 1) {
-      return query.getSpans(leaves[0]);
+      return query.getSpans(leaves[0], leaves[0].reader.getLiveDocs());
     }
     return new MultiSpansWrapper(leaves, query);
   }
@@ -60,14 +60,14 @@ public class MultiSpansWrapper extends S
       return false;
     }
     if (current == null) {
-      current = query.getSpans(leaves[leafOrd]);
+      current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs());
     }
     while(true) {
       if (current.next()) {
         return true;
       }
       if (++leafOrd < leaves.length) {
-        current = query.getSpans(leaves[leafOrd]);
+        current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs());
       } else {
         current = null;
         break;
@@ -85,17 +85,17 @@ public class MultiSpansWrapper extends S
     int subIndex = ReaderUtil.subIndex(target, leaves);
     assert subIndex >= leafOrd;
     if (subIndex != leafOrd) {
-      current = query.getSpans(leaves[subIndex]);
+      current = query.getSpans(leaves[subIndex], leaves[subIndex].reader.getLiveDocs());
       leafOrd = subIndex;
     } else if (current == null) {
-      current = query.getSpans(leaves[leafOrd]);
+      current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs());
     }
     while (true) {
       if (current.skipTo(target - leaves[leafOrd].docBase)) {
         return true;
       }
       if (++leafOrd < leaves.length) {
-        current = query.getSpans(leaves[leafOrd]);
+        current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader.getLiveDocs());
       } else {
           current = null;
           break;

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java Mon Oct 10 19:58:24 2011
@@ -30,7 +30,6 @@ import org.apache.lucene.search.Explanat
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Weight.ScorerContext;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.ReaderUtil;
@@ -170,7 +169,7 @@ public class TestNearSpansOrdered extend
     Weight w = searcher.createNormalizedWeight(q);
     ReaderContext topReaderContext = searcher.getTopReaderContext();
     AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
-    Scorer s = w.scorer(leaves[0], ScorerContext.def());
+    Scorer s = w.scorer(leaves[0], true, false, leaves[0].reader.getLiveDocs());
     assertEquals(1, s.advance(1));
   }
   

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/search/spans/TestSpans.java Mon Oct 10 19:58:24 2011
@@ -23,7 +23,6 @@ import org.apache.lucene.search.CheckHit
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Weight.ScorerContext;
 import org.apache.lucene.search.similarities.DefaultSimilarity;
 import org.apache.lucene.search.similarities.DefaultSimilarityProvider;
 import org.apache.lucene.search.similarities.Similarity;
@@ -435,7 +434,7 @@ public class TestSpans extends LuceneTes
                                 slop,
                                 ordered);
   
-        spanScorer = searcher.createNormalizedWeight(snq).scorer(leaves[i], ScorerContext.def());
+        spanScorer = searcher.createNormalizedWeight(snq).scorer(leaves[i], true, false, leaves[i].reader.getLiveDocs());
       } finally {
         searcher.setSimilarityProvider(oldSim);
       }

Modified: lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/util/packed/TestPackedInts.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/util/packed/TestPackedInts.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/util/packed/TestPackedInts.java (original)
+++ lucene/dev/branches/solrcloud/lucene/src/test/org/apache/lucene/util/packed/TestPackedInts.java Mon Oct 10 19:58:24 2011
@@ -115,12 +115,38 @@ public class TestPackedInts extends Luce
           assertEquals(fp, in.getFilePointer());
           in.close();
         }
+        
+        { // test reader iterator get
+          IndexInput in = d.openInput("out.bin", newIOContext(random));
+          PackedInts.RandomAccessReaderIterator intsEnum = PackedInts.getRandomAccessReaderIterator(in);
+          for (int i = 0; i < valueCount; i++) {
+            final String msg = "index=" + i + " ceil=" + ceil + " valueCount="
+                + valueCount + " nbits=" + nbits + " for "
+                + intsEnum.getClass().getSimpleName();
+            final int ord = random.nextInt(valueCount);
+            long seek = intsEnum.get(ord);
+            assertEquals(msg, seek, values[ord]);
+            if (random.nextBoolean() && ord < valueCount-1) {
+              if (random.nextBoolean()) {
+                assertEquals(msg, values[ord+1], intsEnum.advance(ord+1));
+              } else {
+                assertEquals(msg, values[ord+1], intsEnum.next());
+              }
+            }
+          }
+          if (intsEnum.ord() < valueCount - 1)
+            assertEquals(values[valueCount - 1], intsEnum
+                .advance(valueCount - 1));
+          assertEquals(valueCount - 1, intsEnum.ord());
+          assertEquals(fp, in.getFilePointer());
+          in.close();
+        }
         ceil *= 2;
         d.close();
       }
     }
   }
-
+  
   public void testControlledEquality() {
     final int VALUE_COUNT = 255;
     final int BITS_PER_VALUE = 8;

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellDictionary.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellDictionary.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellDictionary.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellDictionary.java Mon Oct 10 19:58:24 2011
@@ -27,6 +27,7 @@ import java.text.ParseException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Locale;
 
 public class HunspellDictionary {
 
@@ -43,11 +44,15 @@ public class HunspellDictionary {
   private static final String PREFIX_CONDITION_REGEX_PATTERN = "%s.*";
   private static final String SUFFIX_CONDITION_REGEX_PATTERN = ".*%s";
 
+  private static final boolean IGNORE_CASE_DEFAULT = false;
+
   private CharArrayMap<List<HunspellWord>> words;
   private CharArrayMap<List<HunspellAffix>> prefixes;
   private CharArrayMap<List<HunspellAffix>> suffixes;
 
   private FlagParsingStrategy flagParsingStrategy = new SimpleFlagParsingStrategy(); // Default flag parsing strategy
+  private boolean ignoreCase = IGNORE_CASE_DEFAULT;
+
   private final Version version;
 
   /**
@@ -61,7 +66,22 @@ public class HunspellDictionary {
    * @throws ParseException Can be thrown if the content of the files does not meet expected formats
    */
   public HunspellDictionary(InputStream affix, InputStream dictionary, Version version) throws IOException, ParseException {
-    this(affix, Arrays.asList(dictionary), version);
+    this(affix, Arrays.asList(dictionary), version, IGNORE_CASE_DEFAULT);
+  }
+
+  /**
+   * Creates a new HunspellDictionary containing the information read from the provided InputStreams to hunspell affix
+   * and dictionary files
+   *
+   * @param affix InputStream for reading the hunspell affix file
+   * @param dictionary InputStream for reading the hunspell dictionary file
+   * @param version Lucene Version
+   * @param ignoreCase If true, dictionary matching will be case insensitive
+   * @throws IOException Can be thrown while reading from the InputStreams
+   * @throws ParseException Can be thrown if the content of the files does not meet expected formats
+   */
+  public HunspellDictionary(InputStream affix, InputStream dictionary, Version version, boolean ignoreCase) throws IOException, ParseException {
+    this(affix, Arrays.asList(dictionary), version, ignoreCase);
   }
 
   /**
@@ -71,15 +91,17 @@ public class HunspellDictionary {
    * @param affix InputStream for reading the hunspell affix file
    * @param dictionaries InputStreams for reading the hunspell dictionary file
    * @param version Lucene Version
+   * @param ignoreCase If true, dictionary matching will be case insensitive
    * @throws IOException Can be thrown while reading from the InputStreams
    * @throws ParseException Can be thrown if the content of the files does not meet expected formats
    */
-  public HunspellDictionary(InputStream affix, List<InputStream> dictionaries, Version version) throws IOException, ParseException {
+  public HunspellDictionary(InputStream affix, List<InputStream> dictionaries, Version version, boolean ignoreCase) throws IOException, ParseException {
     this.version = version;
+    this.ignoreCase = ignoreCase;
     String encoding = getDictionaryEncoding(affix);
     CharsetDecoder decoder = getJavaEncoding(encoding);
     readAffixFile(affix, decoder);
-    words = new CharArrayMap<List<HunspellWord>>(version, 65535 /* guess */, false);
+    words = new CharArrayMap<List<HunspellWord>>(version, 65535 /* guess */, this.ignoreCase);
     for (InputStream dictionary : dictionaries) {
       readDictionaryFile(dictionary, decoder);
     }
@@ -129,8 +151,8 @@ public class HunspellDictionary {
    * @throws IOException Can be thrown while reading from the InputStream
    */
   private void readAffixFile(InputStream affixStream, CharsetDecoder decoder) throws IOException {
-    prefixes = new CharArrayMap<List<HunspellAffix>>(version, 8, false);
-    suffixes = new CharArrayMap<List<HunspellAffix>>(version, 8, false);
+    prefixes = new CharArrayMap<List<HunspellAffix>>(version, 8, ignoreCase);
+    suffixes = new CharArrayMap<List<HunspellAffix>>(version, 8, ignoreCase);
     
     BufferedReader reader = new BufferedReader(new InputStreamReader(affixStream, decoder));
     String line = null;
@@ -308,6 +330,9 @@ public class HunspellDictionary {
         wordForm = new HunspellWord(flagParsingStrategy.parseFlags(line.substring(flagSep + 1, end)));
         Arrays.sort(wordForm.getFlags());
         entry = line.substring(0, flagSep);
+        if(ignoreCase) {
+          entry = entry.toLowerCase(Locale.ENGLISH);
+        }
       }
       
       List<HunspellWord> entries = words.get(entry);
@@ -408,4 +433,8 @@ public class HunspellDictionary {
       return flags;
     }
   }
+
+  public boolean isIgnoreCase() {
+    return ignoreCase;
+  }
 }

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemmer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemmer.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemmer.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemmer.java Mon Oct 10 19:58:24 2011
@@ -21,9 +21,14 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.text.ParseException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Scanner;
 
 import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.util.CharacterUtils;
 import org.apache.lucene.util.Version;
 
 /**
@@ -36,6 +41,7 @@ public class HunspellStemmer {
   
   private final HunspellDictionary dictionary;
   private final StringBuilder segment = new StringBuilder();
+  private CharacterUtils charUtils = CharacterUtils.getInstance(Version.LUCENE_40);
 
   /**
    * Constructs a new HunspellStemmer which will use the provided HunspellDictionary to create its stems
@@ -79,7 +85,7 @@ public class HunspellStemmer {
    */
   public List<Stem> uniqueStems(char word[], int length) {
     List<Stem> stems = new ArrayList<Stem>();
-    CharArraySet terms = new CharArraySet(dictionary.getVersion(), 8, false);
+    CharArraySet terms = new CharArraySet(dictionary.getVersion(), 8, dictionary.isIgnoreCase());
     if (dictionary.lookupWord(word, 0, length) != null) {
       stems.add(new Stem(word, length));
       terms.add(word);
@@ -167,6 +173,12 @@ public class HunspellStemmer {
    */
   @SuppressWarnings("unchecked")
   public List<Stem> applyAffix(char strippedWord[], int length, HunspellAffix affix, int recursionDepth) {
+    if(dictionary.isIgnoreCase()) {
+      for(int i=0;i<strippedWord.length;){
+        i += Character.toChars(
+              Character.toLowerCase(charUtils.codePointAt(strippedWord, i)), strippedWord, i);
+      }
+    }
     segment.setLength(0);
     segment.append(strippedWord, 0, length);
     if (!affix.checkCondition(segment)) {
@@ -174,7 +186,7 @@ public class HunspellStemmer {
     }
 
     List<Stem> stems = new ArrayList<Stem>();
-    
+
     List<HunspellWord> words = dictionary.lookupWord(strippedWord, 0, length);
     if (words != null) {
       for (HunspellWord hunspellWord : words) {
@@ -294,15 +306,24 @@ public class HunspellStemmer {
    * @throws ParseException Can be thrown while parsing the files
    */
   public static void main(String[] args) throws IOException, ParseException {
-    if (args.length != 2) {
-      System.out.println("usage: HunspellStemmer <affix location> <dic location>");
+    boolean ignoreCase = false;
+    int offset = 0;
+    
+    if (args.length < 2) {
+      System.out.println("usage: HunspellStemmer [-i] <affix location> <dic location>");
       System.exit(1);
     }
 
-    InputStream affixInputStream = new FileInputStream(args[0]);
-    InputStream dicInputStream = new FileInputStream(args[1]);
+    if(args[offset].equals("-i")) {
+      ignoreCase = true;
+      System.out.println("Ignoring case. All stems will be returned lowercased");
+      offset++;
+    }
+    
+    InputStream affixInputStream = new FileInputStream(args[offset++]);
+    InputStream dicInputStream = new FileInputStream(args[offset++]);
 
-    HunspellDictionary dictionary = new HunspellDictionary(affixInputStream, dicInputStream, Version.LUCENE_40);
+    HunspellDictionary dictionary = new HunspellDictionary(affixInputStream, dicInputStream, Version.LUCENE_40, ignoreCase);
 
     affixInputStream.close();
     dicInputStream.close();

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.java Mon Oct 10 19:58:24 2011
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 2/9/11 11:45 AM */
+/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 9/30/11 12:10 PM */
 
 package org.apache.lucene.analysis.standard;
 
@@ -33,8 +33,8 @@ import org.apache.lucene.analysis.tokena
 /**
  * This class is a scanner generated by 
  * <a href="http://www.jflex.de/">JFlex</a> 1.5.0-SNAPSHOT
- * on 2/9/11 11:45 AM from the specification file
- * <tt>C:/Users/rmuir/workspace/lucene-2911/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.jflex</tt>
+ * on 9/30/11 12:10 PM from the specification file
+ * <tt>/lucene/jflex/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.jflex</tt>
  */
 class ClassicTokenizerImpl implements StandardTokenizerInterface {
 
@@ -694,17 +694,17 @@ public final void getText(CharTermAttrib
           { return HOST;
           }
         case 13: break;
-        case 1: 
-          { /* ignore */
-          }
-        case 14: break;
         case 8: 
           { return ACRONYM_DEP;
           }
-        case 15: break;
+        case 14: break;
         case 5: 
           { return NUM;
           }
+        case 15: break;
+        case 1: 
+          { /* Break so we don't hit fall-through warning: */ break;/* ignore */
+          }
         case 16: break;
         case 9: 
           { return ACRONYM;

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.jflex
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.jflex?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.jflex (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.jflex Mon Oct 10 19:58:24 2011
@@ -127,4 +127,4 @@ WHITESPACE = \r\n | [ \r\n\t\f]
 {ACRONYM_DEP}                                                  { return ACRONYM_DEP; }
 
 /** Ignore the rest */
-. | {WHITESPACE}                                               { /* ignore */ }
+. | {WHITESPACE}                                               { /* Break so we don't hit fall-through warning: */ break;/* ignore */ }

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/SUPPLEMENTARY.jflex-macro
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/SUPPLEMENTARY.jflex-macro?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/SUPPLEMENTARY.jflex-macro (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/SUPPLEMENTARY.jflex-macro Mon Oct 10 19:58:24 2011
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-// Generated using ICU4J 4.6.0.0 on Wednesday, February 9, 2011 4:45:11 PM UTC
+// Generated using ICU4J 4.8.0.0 on Friday, September 30, 2011 4:10:42 PM UTC
 // by org.apache.lucene.analysis.icu.GenerateJFlexSupplementaryMacros
 
 

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java Mon Oct 10 19:58:24 2011
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 8/4/11 4:07 PM */
+/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 9/30/11 12:10 PM */
 
 package org.apache.lucene.analysis.standard;
 
@@ -1074,21 +1074,21 @@ public final class StandardTokenizerImpl
           { return IDEOGRAPHIC_TYPE;
           }
         case 12: break;
-        case 1: 
-          { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
-          }
-        case 13: break;
         case 8: 
           { return HANGUL_TYPE;
           }
-        case 14: break;
+        case 13: break;
         case 3: 
           { return NUMERIC_TYPE;
           }
-        case 15: break;
+        case 14: break;
         case 7: 
           { return HIRAGANA_TYPE;
           }
+        case 15: break;
+        case 1: 
+          { /* Break so we don't hit fall-through warning: */ break; /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
+          }
         case 16: break;
         default: 
           if (zzInput == YYEOF && zzStartRead == zzCurrentPos) {

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex Mon Oct 10 19:58:24 2011
@@ -189,4 +189,4 @@ HiraganaEx = {Hiragana} ({Format} | {Ext
 //        WB3b.  ÷ (Newline | CR | LF)
 //        WB14.  Any ÷ Any
 //
-[^] { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ }
+[^] { /* Break so we don't hit fall-through warning: */ break; /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ }

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.java Mon Oct 10 19:58:24 2011
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 8/4/11 7:48 PM */
+/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 9/30/11 12:10 PM */
 
 package org.apache.lucene.analysis.standard;
 
@@ -3712,25 +3712,25 @@ public final class UAX29URLEmailTokenize
           { return SOUTH_EAST_ASIAN_TYPE;
           }
         case 12: break;
+        case 1: 
+          { /* Break so we don't hit fall-through warning: */ break;/* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
+          }
+        case 13: break;
         case 10: 
           { return URL_TYPE;
           }
-        case 13: break;
+        case 14: break;
         case 9: 
           { return EMAIL_TYPE;
           }
-        case 14: break;
+        case 15: break;
         case 4: 
           { return KATAKANA_TYPE;
           }
-        case 15: break;
+        case 16: break;
         case 6: 
           { return IDEOGRAPHIC_TYPE;
           }
-        case 16: break;
-        case 1: 
-          { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
-          }
         case 17: break;
         case 8: 
           { return HANGUL_TYPE;

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.jflex
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.jflex?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.jflex (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerImpl.jflex Mon Oct 10 19:58:24 2011
@@ -277,4 +277,4 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNam
 //        WB3b.  ÷ (Newline | CR | LF)
 //        WB14.  Any ÷ Any
 //
-[^] { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ }
+[^] { /* Break so we don't hit fall-through warning: */ break;/* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ }

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.java Mon Oct 10 19:58:24 2011
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 8/4/11 4:07 PM */
+/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 9/30/11 12:10 PM */
 
 package org.apache.lucene.analysis.standard.std31;
 
@@ -34,8 +34,8 @@ import org.apache.lucene.analysis.tokena
 /**
  * This class is a scanner generated by 
  * <a href="http://www.jflex.de/">JFlex</a> 1.5.0-SNAPSHOT
- * on 8/4/11 4:07 PM from the specification file
- * <tt>/home/rmuir/workspace/lucene-clean-trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.jflex</tt>
+ * on 9/30/11 12:10 PM from the specification file
+ * <tt>/lucene/jflex/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.jflex</tt>
  */
 public final class StandardTokenizerImpl31 implements StandardTokenizerInterface {
 
@@ -1055,21 +1055,21 @@ public final class StandardTokenizerImpl
           { return IDEOGRAPHIC_TYPE;
           }
         case 12: break;
-        case 1: 
-          { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
-          }
-        case 13: break;
         case 8: 
           { return HANGUL_TYPE;
           }
-        case 14: break;
+        case 13: break;
         case 3: 
           { return NUMERIC_TYPE;
           }
-        case 15: break;
+        case 14: break;
         case 7: 
           { return HIRAGANA_TYPE;
           }
+        case 15: break;
+        case 1: 
+          { /* Break so we don't hit fall-through warning: */ break; /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
+          }
         case 16: break;
         default: 
           if (zzInput == YYEOF && zzStartRead == zzCurrentPos) {

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.jflex
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.jflex?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.jflex (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/StandardTokenizerImpl31.jflex Mon Oct 10 19:58:24 2011
@@ -181,4 +181,4 @@ ExtendNumLetEx = {ExtendNumLet}         
 //        WB3b.  ÷ (Newline | CR | LF)
 //        WB14.  Any ÷ Any
 //
-[^] { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ }
+[^] { /* Break so we don't hit fall-through warning: */ break; /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ }

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.java Mon Oct 10 19:58:24 2011
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 8/4/11 7:33 PM */
+/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 9/30/11 12:10 PM */
 
 package org.apache.lucene.analysis.standard.std31;
 
@@ -34,8 +34,8 @@ import org.apache.lucene.analysis.tokena
 /**
  * This class is a scanner generated by 
  * <a href="http://www.jflex.de/">JFlex</a> 1.5.0-SNAPSHOT
- * on 8/4/11 7:33 PM from the specification file
- * <tt>/home/rmuir/workspace/lucene-clean-trunk/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.jflex</tt>
+ * on 9/30/11 12:10 PM from the specification file
+ * <tt>/lucene/jflex/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.jflex</tt>
  */
 public final class UAX29URLEmailTokenizerImpl31 implements StandardTokenizerInterface {
 
@@ -3622,25 +3622,25 @@ public final class UAX29URLEmailTokenize
           { return SOUTH_EAST_ASIAN_TYPE;
           }
         case 12: break;
+        case 1: 
+          { /* Break so we don't hit fall-through warning: */ break;/* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
+          }
+        case 13: break;
         case 10: 
           { return URL_TYPE;
           }
-        case 13: break;
+        case 14: break;
         case 9: 
           { return EMAIL_TYPE;
           }
-        case 14: break;
+        case 15: break;
         case 4: 
           { return KATAKANA_TYPE;
           }
-        case 15: break;
+        case 16: break;
         case 6: 
           { return IDEOGRAPHIC_TYPE;
           }
-        case 16: break;
-        case 1: 
-          { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */
-          }
         case 17: break;
         case 8: 
           { return HANGUL_TYPE;

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.jflex
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.jflex?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.jflex (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/std31/UAX29URLEmailTokenizerImpl31.jflex Mon Oct 10 19:58:24 2011
@@ -266,4 +266,4 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNam
 //        WB3b.  ÷ (Newline | CR | LF)
 //        WB14.  Any ÷ Any
 //
-[^] { /* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ }
+[^] { /* Break so we don't hit fall-through warning: */ break;/* Not numeric, word, ideographic, hiragana, or SE Asian -- ignore it. */ }

Modified: lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java?rev=1181188&r1=1181187&r2=1181188&view=diff
==============================================================================
--- lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java (original)
+++ lucene/dev/branches/solrcloud/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java Mon Oct 10 19:58:24 2011
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 2/9/11 11:45 AM */
+/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 9/30/11 12:11 PM */
 
 package org.apache.lucene.analysis.wikipedia;
 
@@ -25,8 +25,8 @@ import org.apache.lucene.analysis.tokena
 /**
  * This class is a scanner generated by 
  * <a href="http://www.jflex.de/">JFlex</a> 1.5.0-SNAPSHOT
- * on 2/9/11 11:45 AM from the specification file
- * <tt>C:/Users/rmuir/workspace/lucene-2911/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex</tt>
+ * on 9/30/11 12:11 PM from the specification file
+ * <tt>/lucene/jflex/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex</tt>
  */
 class WikipediaTokenizerImpl {
 
@@ -92,15 +92,16 @@ class WikipediaTokenizerImpl {
     "\1\13\1\14\1\10\1\15\1\16\1\15\1\17\1\20"+
     "\1\10\1\21\1\10\4\22\1\23\1\22\1\24\1\25"+
     "\1\26\3\0\1\27\14\0\1\30\1\31\1\32\1\33"+
-    "\1\11\1\0\1\34\1\35\1\0\1\36\1\0\1\37"+
-    "\3\0\1\40\1\41\2\42\1\41\2\43\2\0\1\42"+
-    "\1\0\14\42\1\41\3\0\1\11\1\44\3\0\1\45"+
-    "\1\46\5\0\1\47\4\0\1\47\2\0\2\47\2\0"+
-    "\1\11\5\0\1\31\1\41\1\42\1\50\3\0\1\11"+
-    "\2\0\1\51\30\0\1\52\2\0\1\53\1\54\1\55";
+    "\1\11\1\0\1\34\1\35\1\36\1\0\1\37\1\0"+
+    "\1\40\3\0\1\41\1\42\2\43\1\42\2\44\2\0"+
+    "\1\43\1\0\14\43\1\42\3\0\1\11\1\45\3\0"+
+    "\1\46\1\47\5\0\1\50\4\0\1\50\2\0\2\50"+
+    "\2\0\1\11\5\0\1\31\1\42\1\43\1\51\3\0"+
+    "\1\11\2\0\1\52\30\0\1\53\2\0\1\54\1\55"+
+    "\1\56";
 
   private static int [] zzUnpackAction() {
-    int [] result = new int[183];
+    int [] result = new int[184];
     int offset = 0;
     offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result);
     return result;
@@ -134,23 +135,23 @@ class WikipediaTokenizerImpl {
     "\0\u070c\0\u0738\0\u0764\0\u0790\0\u01b8\0\u01b8\0\u07bc\0\u07e8"+
     "\0\u0814\0\u01b8\0\u0840\0\u086c\0\u0898\0\u08c4\0\u08f0\0\u091c"+
     "\0\u0948\0\u0974\0\u09a0\0\u09cc\0\u09f8\0\u0a24\0\u0a50\0\u0a7c"+
-    "\0\u01b8\0\u01b8\0\u0aa8\0\u0ad4\0\u0b00\0\u0b00\0\u0b2c\0\u0b58"+
-    "\0\u0b84\0\u0bb0\0\u0bdc\0\u0c08\0\u0c34\0\u0c60\0\u0c8c\0\u0cb8"+
-    "\0\u0ce4\0\u0d10\0\u0898\0\u0d3c\0\u0d68\0\u0d94\0\u0dc0\0\u0dec"+
-    "\0\u0e18\0\u0e44\0\u0e70\0\u0e9c\0\u0ec8\0\u0ef4\0\u0f20\0\u0f4c"+
-    "\0\u0f78\0\u0fa4\0\u0fd0\0\u0ffc\0\u1028\0\u1054\0\u1080\0\u10ac"+
-    "\0\u10d8\0\u01b8\0\u1104\0\u1130\0\u115c\0\u1188\0\u01b8\0\u11b4"+
-    "\0\u11e0\0\u120c\0\u1238\0\u1264\0\u1290\0\u12bc\0\u12e8\0\u1314"+
-    "\0\u1340\0\u136c\0\u1398\0\u13c4\0\u086c\0\u09f8\0\u13f0\0\u141c"+
-    "\0\u1448\0\u1474\0\u14a0\0\u14cc\0\u14f8\0\u1524\0\u01b8\0\u1550"+
-    "\0\u157c\0\u15a8\0\u15d4\0\u1600\0\u162c\0\u1658\0\u1684\0\u16b0"+
-    "\0\u01b8\0\u16dc\0\u1708\0\u1734\0\u1760\0\u178c\0\u17b8\0\u17e4"+
-    "\0\u1810\0\u183c\0\u1868\0\u1894\0\u18c0\0\u18ec\0\u1918\0\u1944"+
-    "\0\u1970\0\u199c\0\u19c8\0\u19f4\0\u1a20\0\u1a4c\0\u1a78\0\u1aa4"+
-    "\0\u1ad0\0\u1afc\0\u1b28\0\u1b54\0\u01b8\0\u01b8\0\u01b8";
+    "\0\u01b8\0\u01b8\0\u0aa8\0\u0ad4\0\u0b00\0\u0b00\0\u01b8\0\u0b2c"+
+    "\0\u0b58\0\u0b84\0\u0bb0\0\u0bdc\0\u0c08\0\u0c34\0\u0c60\0\u0c8c"+
+    "\0\u0cb8\0\u0ce4\0\u0d10\0\u0898\0\u0d3c\0\u0d68\0\u0d94\0\u0dc0"+
+    "\0\u0dec\0\u0e18\0\u0e44\0\u0e70\0\u0e9c\0\u0ec8\0\u0ef4\0\u0f20"+
+    "\0\u0f4c\0\u0f78\0\u0fa4\0\u0fd0\0\u0ffc\0\u1028\0\u1054\0\u1080"+
+    "\0\u10ac\0\u10d8\0\u01b8\0\u1104\0\u1130\0\u115c\0\u1188\0\u01b8"+
+    "\0\u11b4\0\u11e0\0\u120c\0\u1238\0\u1264\0\u1290\0\u12bc\0\u12e8"+
+    "\0\u1314\0\u1340\0\u136c\0\u1398\0\u13c4\0\u086c\0\u09f8\0\u13f0"+
+    "\0\u141c\0\u1448\0\u1474\0\u14a0\0\u14cc\0\u14f8\0\u1524\0\u01b8"+
+    "\0\u1550\0\u157c\0\u15a8\0\u15d4\0\u1600\0\u162c\0\u1658\0\u1684"+
+    "\0\u16b0\0\u01b8\0\u16dc\0\u1708\0\u1734\0\u1760\0\u178c\0\u17b8"+
+    "\0\u17e4\0\u1810\0\u183c\0\u1868\0\u1894\0\u18c0\0\u18ec\0\u1918"+
+    "\0\u1944\0\u1970\0\u199c\0\u19c8\0\u19f4\0\u1a20\0\u1a4c\0\u1a78"+
+    "\0\u1aa4\0\u1ad0\0\u1afc\0\u1b28\0\u1b54\0\u01b8\0\u01b8\0\u01b8";
 
   private static int [] zzUnpackRowMap() {
-    int [] result = new int[183];
+    int [] result = new int[184];
     int offset = 0;
     offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result);
     return result;
@@ -208,115 +209,115 @@ class WikipediaTokenizerImpl {
     "\3\0\3\40\16\0\4\40\7\0\2\40\1\113\12\40"+
     "\3\0\3\40\2\0\1\114\67\0\4\44\7\0\15\44"+
     "\3\0\3\44\24\0\1\36\55\0\1\115\43\0\4\47"+
-    "\7\0\15\47\3\0\3\47\26\0\1\116\37\0\1\111"+
-    "\57\0\4\52\7\0\15\52\3\0\3\52\11\0\1\117"+
+    "\7\0\15\47\3\0\3\47\26\0\1\116\37\0\1\117"+
+    "\57\0\4\52\7\0\15\52\3\0\3\52\11\0\1\120"+
     "\4\0\4\70\7\0\15\70\3\0\3\70\16\0\4\54"+
-    "\7\0\15\54\3\0\3\54\47\0\1\111\6\0\1\120"+
-    "\63\0\1\121\57\0\4\62\7\0\15\62\3\0\3\62"+
-    "\24\0\1\56\55\0\1\122\43\0\4\70\7\0\15\70"+
-    "\3\0\3\70\14\0\1\36\1\0\4\123\1\0\3\124"+
-    "\3\0\15\123\3\0\3\123\14\0\1\36\1\0\4\123"+
-    "\1\0\3\124\3\0\3\123\1\125\11\123\3\0\3\123"+
-    "\16\0\1\126\1\0\1\126\10\0\15\126\3\0\3\126"+
-    "\16\0\1\127\1\130\1\131\1\132\7\0\15\127\3\0"+
-    "\3\127\16\0\1\133\1\0\1\133\10\0\15\133\3\0"+
-    "\3\133\16\0\1\134\1\135\1\134\1\135\7\0\15\134"+
-    "\3\0\3\134\16\0\1\136\2\137\1\140\7\0\15\136"+
-    "\3\0\3\136\16\0\1\100\2\141\10\0\15\100\3\0"+
-    "\3\100\16\0\1\142\2\143\1\144\7\0\15\142\3\0"+
-    "\3\142\16\0\4\135\7\0\15\135\3\0\3\135\16\0"+
-    "\1\145\2\146\1\147\7\0\15\145\3\0\3\145\16\0"+
-    "\1\150\2\151\1\152\7\0\15\150\3\0\3\150\16\0"+
-    "\1\153\1\143\1\154\1\144\7\0\15\153\3\0\3\153"+
-    "\16\0\1\155\2\130\1\132\7\0\15\155\3\0\3\155"+
-    "\30\0\1\156\1\157\64\0\1\160\27\0\4\40\7\0"+
-    "\2\40\1\161\12\40\3\0\3\40\2\0\1\162\101\0"+
-    "\1\163\1\164\40\0\4\70\7\0\6\70\1\165\6\70"+
-    "\3\0\3\70\2\0\1\166\63\0\1\167\71\0\1\170"+
-    "\1\171\34\0\1\172\1\0\1\36\1\0\4\123\1\0"+
-    "\3\124\3\0\15\123\3\0\3\123\16\0\4\173\1\0"+
-    "\3\124\3\0\15\173\3\0\3\173\12\0\1\172\1\0"+
-    "\1\36\1\0\4\123\1\0\3\124\3\0\10\123\1\174"+
-    "\4\123\3\0\3\123\2\0\1\73\13\0\1\126\1\0"+
-    "\1\126\10\0\15\126\3\0\3\126\3\0\1\175\1\0"+
-    "\1\102\2\176\6\0\1\127\1\130\1\131\1\132\7\0"+
-    "\15\127\3\0\3\127\3\0\1\177\1\0\1\102\2\200"+
-    "\1\0\1\201\3\0\1\201\3\130\1\132\7\0\15\130"+
-    "\3\0\3\130\3\0\1\202\1\0\1\102\2\200\1\0"+
-    "\1\201\3\0\1\201\1\131\1\130\1\131\1\132\7\0"+
-    "\15\131\3\0\3\131\3\0\1\203\1\0\1\102\2\176"+
-    "\6\0\4\132\7\0\15\132\3\0\3\132\3\0\1\204"+
-    "\2\0\1\204\7\0\1\134\1\135\1\134\1\135\7\0"+
-    "\15\134\3\0\3\134\3\0\1\204\2\0\1\204\7\0"+
-    "\4\135\7\0\15\135\3\0\3\135\3\0\1\176\1\0"+
-    "\1\102\2\176\6\0\1\136\2\137\1\140\7\0\15\136"+
-    "\3\0\3\136\3\0\1\200\1\0\1\102\2\200\1\0"+
-    "\1\201\3\0\1\201\3\137\1\140\7\0\15\137\3\0"+
-    "\3\137\3\0\1\176\1\0\1\102\2\176\6\0\4\140"+
-    "\7\0\15\140\3\0\3\140\3\0\1\201\2\0\2\201"+
-    "\1\0\1\201\3\0\1\201\3\141\10\0\15\141\3\0"+
-    "\3\141\3\0\1\106\1\0\1\102\2\77\1\0\1\100"+
-    "\3\0\1\100\1\142\2\143\1\144\7\0\15\142\3\0"+
-    "\3\142\3\0\1\101\1\0\1\102\2\103\1\0\1\104"+
-    "\3\0\1\104\3\143\1\144\7\0\15\143\3\0\3\143"+
+    "\7\0\15\54\3\0\3\54\47\0\1\117\6\0\1\121"+
+    "\63\0\1\122\57\0\4\62\7\0\15\62\3\0\3\62"+
+    "\24\0\1\56\55\0\1\123\43\0\4\70\7\0\15\70"+
+    "\3\0\3\70\14\0\1\36\1\0\4\124\1\0\3\125"+
+    "\3\0\15\124\3\0\3\124\14\0\1\36\1\0\4\124"+
+    "\1\0\3\125\3\0\3\124\1\126\11\124\3\0\3\124"+
+    "\16\0\1\127\1\0\1\127\10\0\15\127\3\0\3\127"+
+    "\16\0\1\130\1\131\1\132\1\133\7\0\15\130\3\0"+
+    "\3\130\16\0\1\134\1\0\1\134\10\0\15\134\3\0"+
+    "\3\134\16\0\1\135\1\136\1\135\1\136\7\0\15\135"+
+    "\3\0\3\135\16\0\1\137\2\140\1\141\7\0\15\137"+
+    "\3\0\3\137\16\0\1\100\2\142\10\0\15\100\3\0"+
+    "\3\100\16\0\1\143\2\144\1\145\7\0\15\143\3\0"+
+    "\3\143\16\0\4\136\7\0\15\136\3\0\3\136\16\0"+
+    "\1\146\2\147\1\150\7\0\15\146\3\0\3\146\16\0"+
+    "\1\151\2\152\1\153\7\0\15\151\3\0\3\151\16\0"+
+    "\1\154\1\144\1\155\1\145\7\0\15\154\3\0\3\154"+
+    "\16\0\1\156\2\131\1\133\7\0\15\156\3\0\3\156"+
+    "\30\0\1\157\1\160\64\0\1\161\27\0\4\40\7\0"+
+    "\2\40\1\162\12\40\3\0\3\40\2\0\1\163\101\0"+
+    "\1\164\1\165\40\0\4\70\7\0\6\70\1\166\6\70"+
+    "\3\0\3\70\2\0\1\167\63\0\1\170\71\0\1\171"+
+    "\1\172\34\0\1\173\1\0\1\36\1\0\4\124\1\0"+
+    "\3\125\3\0\15\124\3\0\3\124\16\0\4\174\1\0"+
+    "\3\125\3\0\15\174\3\0\3\174\12\0\1\173\1\0"+
+    "\1\36\1\0\4\124\1\0\3\125\3\0\10\124\1\175"+
+    "\4\124\3\0\3\124\2\0\1\73\13\0\1\127\1\0"+
+    "\1\127\10\0\15\127\3\0\3\127\3\0\1\176\1\0"+
+    "\1\102\2\177\6\0\1\130\1\131\1\132\1\133\7\0"+
+    "\15\130\3\0\3\130\3\0\1\200\1\0\1\102\2\201"+
+    "\1\0\1\202\3\0\1\202\3\131\1\133\7\0\15\131"+
+    "\3\0\3\131\3\0\1\203\1\0\1\102\2\201\1\0"+
+    "\1\202\3\0\1\202\1\132\1\131\1\132\1\133\7\0"+
+    "\15\132\3\0\3\132\3\0\1\204\1\0\1\102\2\177"+
+    "\6\0\4\133\7\0\15\133\3\0\3\133\3\0\1\205"+
+    "\2\0\1\205\7\0\1\135\1\136\1\135\1\136\7\0"+
+    "\15\135\3\0\3\135\3\0\1\205\2\0\1\205\7\0"+
+    "\4\136\7\0\15\136\3\0\3\136\3\0\1\177\1\0"+
+    "\1\102\2\177\6\0\1\137\2\140\1\141\7\0\15\137"+
+    "\3\0\3\137\3\0\1\201\1\0\1\102\2\201\1\0"+
+    "\1\202\3\0\1\202\3\140\1\141\7\0\15\140\3\0"+
+    "\3\140\3\0\1\177\1\0\1\102\2\177\6\0\4\141"+
+    "\7\0\15\141\3\0\3\141\3\0\1\202\2\0\2\202"+
+    "\1\0\1\202\3\0\1\202\3\142\10\0\15\142\3\0"+
+    "\3\142\3\0\1\106\1\0\1\102\2\77\1\0\1\100"+
+    "\3\0\1\100\1\143\2\144\1\145\7\0\15\143\3\0"+
+    "\3\143\3\0\1\101\1\0\1\102\2\103\1\0\1\104"+
+    "\3\0\1\104\3\144\1\145\7\0\15\144\3\0\3\144"+
     "\3\0\1\106\1\0\1\102\2\77\1\0\1\100\3\0"+
-    "\1\100\4\144\7\0\15\144\3\0\3\144\3\0\1\77"+
-    "\1\0\1\102\2\77\1\0\1\100\3\0\1\100\1\145"+
-    "\2\146\1\147\7\0\15\145\3\0\3\145\3\0\1\103"+
-    "\1\0\1\102\2\103\1\0\1\104\3\0\1\104\3\146"+
-    "\1\147\7\0\15\146\3\0\3\146\3\0\1\77\1\0"+
-    "\1\102\2\77\1\0\1\100\3\0\1\100\4\147\7\0"+
-    "\15\147\3\0\3\147\3\0\1\100\2\0\2\100\1\0"+
-    "\1\100\3\0\1\100\1\150\2\151\1\152\7\0\15\150"+
-    "\3\0\3\150\3\0\1\104\2\0\2\104\1\0\1\104"+
-    "\3\0\1\104\3\151\1\152\7\0\15\151\3\0\3\151"+
+    "\1\100\4\145\7\0\15\145\3\0\3\145\3\0\1\77"+
+    "\1\0\1\102\2\77\1\0\1\100\3\0\1\100\1\146"+
+    "\2\147\1\150\7\0\15\146\3\0\3\146\3\0\1\103"+
+    "\1\0\1\102\2\103\1\0\1\104\3\0\1\104\3\147"+
+    "\1\150\7\0\15\147\3\0\3\147\3\0\1\77\1\0"+
+    "\1\102\2\77\1\0\1\100\3\0\1\100\4\150\7\0"+
+    "\15\150\3\0\3\150\3\0\1\100\2\0\2\100\1\0"+
+    "\1\100\3\0\1\100\1\151\2\152\1\153\7\0\15\151"+
+    "\3\0\3\151\3\0\1\104\2\0\2\104\1\0\1\104"+
+    "\3\0\1\104\3\152\1\153\7\0\15\152\3\0\3\152"+
     "\3\0\1\100\2\0\2\100\1\0\1\100\3\0\1\100"+
-    "\4\152\7\0\15\152\3\0\3\152\3\0\1\205\1\0"+
-    "\1\102\2\77\1\0\1\100\3\0\1\100\1\153\1\143"+
-    "\1\154\1\144\7\0\15\153\3\0\3\153\3\0\1\206"+
-    "\1\0\1\102\2\103\1\0\1\104\3\0\1\104\1\154"+
-    "\1\143\1\154\1\144\7\0\15\154\3\0\3\154\3\0"+
-    "\1\203\1\0\1\102\2\176\6\0\1\155\2\130\1\132"+
-    "\7\0\15\155\3\0\3\155\31\0\1\157\54\0\1\207"+
-    "\64\0\1\210\26\0\4\40\7\0\15\40\3\0\1\40"+
-    "\1\211\1\40\31\0\1\164\54\0\1\212\35\0\1\36"+
-    "\1\0\4\123\1\0\3\124\3\0\3\123\1\213\11\123"+
-    "\3\0\3\123\2\0\1\214\102\0\1\171\54\0\1\215"+
-    "\34\0\1\216\52\0\1\172\3\0\4\173\7\0\15\173"+
-    "\3\0\3\173\12\0\1\172\1\0\1\217\1\0\4\123"+
-    "\1\0\3\124\3\0\15\123\3\0\3\123\16\0\1\220"+
-    "\1\132\1\220\1\132\7\0\15\220\3\0\3\220\16\0"+
-    "\4\140\7\0\15\140\3\0\3\140\16\0\4\144\7\0"+
-    "\15\144\3\0\3\144\16\0\4\147\7\0\15\147\3\0"+
-    "\3\147\16\0\4\152\7\0\15\152\3\0\3\152\16\0"+
-    "\1\221\1\144\1\221\1\144\7\0\15\221\3\0\3\221"+
-    "\16\0\4\132\7\0\15\132\3\0\3\132\16\0\4\222"+
-    "\7\0\15\222\3\0\3\222\33\0\1\223\61\0\1\224"+
-    "\30\0\4\40\6\0\1\225\15\40\3\0\2\40\1\226"+
-    "\33\0\1\227\32\0\1\172\1\0\1\36\1\0\4\123"+
-    "\1\0\3\124\3\0\10\123\1\230\4\123\3\0\3\123"+
-    "\2\0\1\231\104\0\1\232\36\0\4\233\7\0\15\233"+
-    "\3\0\3\233\3\0\1\175\1\0\1\102\2\176\6\0"+
-    "\1\220\1\132\1\220\1\132\7\0\15\220\3\0\3\220"+
-    "\3\0\1\205\1\0\1\102\2\77\1\0\1\100\3\0"+
-    "\1\100\1\221\1\144\1\221\1\144\7\0\15\221\3\0"+
-    "\3\221\3\0\1\204\2\0\1\204\7\0\4\222\7\0"+
-    "\15\222\3\0\3\222\34\0\1\234\55\0\1\235\26\0"+
-    "\1\236\60\0\4\40\6\0\1\225\15\40\3\0\3\40"+
-    "\34\0\1\237\31\0\1\172\1\0\1\111\1\0\4\123"+
-    "\1\0\3\124\3\0\15\123\3\0\3\123\34\0\1\240"+
-    "\32\0\1\241\2\0\4\233\7\0\15\233\3\0\3\233"+
-    "\35\0\1\242\62\0\1\243\20\0\1\244\77\0\1\245"+
-    "\53\0\1\246\32\0\1\36\1\0\4\173\1\0\3\124"+
-    "\3\0\15\173\3\0\3\173\36\0\1\247\53\0\1\250"+
-    "\33\0\4\251\7\0\15\251\3\0\3\251\36\0\1\252"+
-    "\53\0\1\253\54\0\1\254\61\0\1\255\11\0\1\256"+
-    "\12\0\4\251\7\0\15\251\3\0\3\251\37\0\1\257"+
-    "\53\0\1\260\54\0\1\261\22\0\1\13\62\0\4\262"+
-    "\7\0\15\262\3\0\3\262\40\0\1\263\53\0\1\264"+
-    "\43\0\1\265\26\0\2\262\1\0\2\262\1\0\2\262"+
-    "\2\0\5\262\7\0\15\262\3\0\4\262\27\0\1\266"+
-    "\53\0\1\267\24\0";
+    "\4\153\7\0\15\153\3\0\3\153\3\0\1\206\1\0"+
+    "\1\102\2\77\1\0\1\100\3\0\1\100\1\154\1\144"+
+    "\1\155\1\145\7\0\15\154\3\0\3\154\3\0\1\207"+
+    "\1\0\1\102\2\103\1\0\1\104\3\0\1\104\1\155"+
+    "\1\144\1\155\1\145\7\0\15\155\3\0\3\155\3\0"+
+    "\1\204\1\0\1\102\2\177\6\0\1\156\2\131\1\133"+
+    "\7\0\15\156\3\0\3\156\31\0\1\160\54\0\1\210"+
+    "\64\0\1\211\26\0\4\40\7\0\15\40\3\0\1\40"+
+    "\1\212\1\40\31\0\1\165\54\0\1\213\35\0\1\36"+
+    "\1\0\4\124\1\0\3\125\3\0\3\124\1\214\11\124"+
+    "\3\0\3\124\2\0\1\215\102\0\1\172\54\0\1\216"+
+    "\34\0\1\217\52\0\1\173\3\0\4\174\7\0\15\174"+
+    "\3\0\3\174\12\0\1\173\1\0\1\220\1\0\4\124"+
+    "\1\0\3\125\3\0\15\124\3\0\3\124\16\0\1\221"+
+    "\1\133\1\221\1\133\7\0\15\221\3\0\3\221\16\0"+
+    "\4\141\7\0\15\141\3\0\3\141\16\0\4\145\7\0"+
+    "\15\145\3\0\3\145\16\0\4\150\7\0\15\150\3\0"+
+    "\3\150\16\0\4\153\7\0\15\153\3\0\3\153\16\0"+
+    "\1\222\1\145\1\222\1\145\7\0\15\222\3\0\3\222"+
+    "\16\0\4\133\7\0\15\133\3\0\3\133\16\0\4\223"+
+    "\7\0\15\223\3\0\3\223\33\0\1\224\61\0\1\225"+
+    "\30\0\4\40\6\0\1\226\15\40\3\0\2\40\1\227"+
+    "\33\0\1\230\32\0\1\173\1\0\1\36\1\0\4\124"+
+    "\1\0\3\125\3\0\10\124\1\231\4\124\3\0\3\124"+
+    "\2\0\1\232\104\0\1\233\36\0\4\234\7\0\15\234"+
+    "\3\0\3\234\3\0\1\176\1\0\1\102\2\177\6\0"+
+    "\1\221\1\133\1\221\1\133\7\0\15\221\3\0\3\221"+
+    "\3\0\1\206\1\0\1\102\2\77\1\0\1\100\3\0"+
+    "\1\100\1\222\1\145\1\222\1\145\7\0\15\222\3\0"+
+    "\3\222\3\0\1\205\2\0\1\205\7\0\4\223\7\0"+
+    "\15\223\3\0\3\223\34\0\1\235\55\0\1\236\26\0"+
+    "\1\237\60\0\4\40\6\0\1\226\15\40\3\0\3\40"+
+    "\34\0\1\240\31\0\1\173\1\0\1\117\1\0\4\124"+
+    "\1\0\3\125\3\0\15\124\3\0\3\124\34\0\1\241"+
+    "\32\0\1\242\2\0\4\234\7\0\15\234\3\0\3\234"+
+    "\35\0\1\243\62\0\1\244\20\0\1\245\77\0\1\246"+
+    "\53\0\1\247\32\0\1\36\1\0\4\174\1\0\3\125"+
+    "\3\0\15\174\3\0\3\174\36\0\1\250\53\0\1\251"+
+    "\33\0\4\252\7\0\15\252\3\0\3\252\36\0\1\253"+
+    "\53\0\1\254\54\0\1\255\61\0\1\256\11\0\1\257"+
+    "\12\0\4\252\7\0\15\252\3\0\3\252\37\0\1\260"+
+    "\53\0\1\261\54\0\1\262\22\0\1\13\62\0\4\263"+
+    "\7\0\15\263\3\0\3\263\40\0\1\264\53\0\1\265"+
+    "\43\0\1\266\26\0\2\263\1\0\2\263\1\0\2\263"+
+    "\2\0\5\263\7\0\15\263\3\0\4\263\27\0\1\267"+
+    "\53\0\1\270\24\0";
 
   private static int [] zzUnpackTrans() {
     int [] result = new int[7040];
@@ -359,15 +360,15 @@ class WikipediaTokenizerImpl {
   private static final String ZZ_ATTRIBUTE_PACKED_0 =
     "\12\0\1\11\7\1\1\11\3\1\1\11\6\1\1\11"+
     "\2\1\1\11\14\1\1\11\6\1\2\11\3\0\1\11"+
-    "\14\0\2\1\2\11\1\1\1\0\2\1\1\0\1\1"+
-    "\1\0\1\1\3\0\7\1\2\0\1\1\1\0\15\1"+
-    "\3\0\1\1\1\11\3\0\1\1\1\11\5\0\1\1"+
-    "\4\0\1\1\2\0\2\1\2\0\1\1\5\0\1\11"+
-    "\3\1\3\0\1\1\2\0\1\11\30\0\1\1\2\0"+
-    "\3\11";
+    "\14\0\2\1\2\11\1\1\1\0\2\1\1\11\1\0"+
+    "\1\1\1\0\1\1\3\0\7\1\2\0\1\1\1\0"+
+    "\15\1\3\0\1\1\1\11\3\0\1\1\1\11\5\0"+
+    "\1\1\4\0\1\1\2\0\2\1\2\0\1\1\5\0"+
+    "\1\11\3\1\3\0\1\1\2\0\1\11\30\0\1\1"+
+    "\2\0\3\11";
 
   private static int [] zzUnpackAttribute() {
-    int [] result = new int[183];
+    int [] result = new int[184];
     int offset = 0;
     offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result);
     return result;
@@ -809,186 +810,190 @@ final int setText(StringBuilder buffer){
       zzMarkedPos = zzMarkedPosL;
 
       switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction]) {
-        case 16: 
-          { currentTokType = HEADING; yybegin(DOUBLE_EQUALS_STATE); numWikiTokensSeen++; return currentTokType;
-          }
-        case 46: break;
-        case 39: 
-          { positionInc = 1; return ACRONYM;
+        case 44: 
+          { numWikiTokensSeen = 0; positionInc = 1; currentTokType = CATEGORY; yybegin(CATEGORY_STATE);/* Break so we don't hit fall-through warning: */ break;
           }
         case 47: break;
-        case 8: 
-          { /* ignore */
+        case 37: 
+          { currentTokType = BOLD_ITALICS;  yybegin(FIVE_SINGLE_QUOTES_STATE); /* Break so we don't hit fall-through warning: */ break;
           }
         case 48: break;
-        case 20: 
-          { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = EXTERNAL_LINK;yybegin(EXTERNAL_LINK_STATE);
+        case 16: 
+          { currentTokType = HEADING; yybegin(DOUBLE_EQUALS_STATE); numWikiTokensSeen++; return currentTokType;
           }
         case 49: break;
-        case 35: 
-          { positionInc = 1; return COMPANY;
+        case 20: 
+          { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = EXTERNAL_LINK;yybegin(EXTERNAL_LINK_STATE); /* Break so we don't hit fall-through warning: */ break;
           }
         case 50: break;
-        case 4: 
-          { numWikiTokensSeen = 0; positionInc = 1; currentTokType = EXTERNAL_LINK_URL; yybegin(EXTERNAL_LINK_STATE);
+        case 40: 
+          { positionInc = 1; return ACRONYM;
           }
         case 51: break;
-        case 25: 
-          { numWikiTokensSeen = 0; positionInc = 1; currentTokType = CITATION; yybegin(DOUBLE_BRACE_STATE);
+        case 5: 
+          { positionInc = 1; /* Break so we don't hit fall-through warning: */ break;
           }
         case 52: break;
-        case 43: 
-          { numWikiTokensSeen = 0; positionInc = 1; currentTokType = CATEGORY; yybegin(CATEGORY_STATE);
+        case 36: 
+          { positionInc = 1; return COMPANY;
           }
         case 53: break;
-        case 22: 
-          { numWikiTokensSeen = 0; positionInc = 1; if (numBalanced == 0){numBalanced++;yybegin(TWO_SINGLE_QUOTES_STATE);} else{numBalanced = 0;}
+        case 10: 
+          { numLinkToks = 0; positionInc = 0; yybegin(YYINITIAL); /* Break so we don't hit fall-through warning: */ break;
           }
         case 54: break;
-        case 34: 
-          { positionInc = 1; return NUM;
+        case 15: 
+          { currentTokType = SUB_HEADING; numWikiTokensSeen = 0; yybegin(STRING); /* Break so we don't hit fall-through warning: */ break;
           }
         case 55: break;
-        case 32: 
-          { positionInc = 1; return APOSTROPHE;
+        case 22: 
+          { numWikiTokensSeen = 0; positionInc = 1; if (numBalanced == 0){numBalanced++;yybegin(TWO_SINGLE_QUOTES_STATE);} else{numBalanced = 0;}/* Break so we don't hit fall-through warning: */ break;
           }
         case 56: break;
-        case 23: 
-          { numWikiTokensSeen = 0; positionInc = 1; yybegin(DOUBLE_EQUALS_STATE);
+        case 35: 
+          { positionInc = 1; return NUM;
           }
         case 57: break;
-        case 21: 
-          { yybegin(STRING); return currentTokType;/*pipe*/
+        case 33: 
+          { positionInc = 1; return APOSTROPHE;
           }
         case 58: break;
-        case 2: 
-          { positionInc = 1; return ALPHANUM;
+        case 21: 
+          { yybegin(STRING); return currentTokType;/*pipe*/
           }
         case 59: break;
-        case 29: 
-          { currentTokType = INTERNAL_LINK; numWikiTokensSeen = 0;  yybegin(INTERNAL_LINK_STATE);
+        case 18: 
+          { /* Break so we don't hit fall-through warning: */ break;/* ignore STRING */
           }
         case 60: break;
-        case 17: 
-          { yybegin(DOUBLE_BRACE_STATE); numWikiTokensSeen = 0; return currentTokType;
+        case 2: 
+          { positionInc = 1; return ALPHANUM;
           }
         case 61: break;
-        case 44: 
-          { currentTokType = CATEGORY; numWikiTokensSeen = 0; yybegin(CATEGORY_STATE);
+        case 1: 
+          { numWikiTokensSeen = 0;  positionInc = 1; /* Break so we don't hit fall-through warning: */ break;
           }
         case 62: break;
-        case 26: 
-          { yybegin(YYINITIAL);
+        case 17: 
+          { yybegin(DOUBLE_BRACE_STATE); numWikiTokensSeen = 0; return currentTokType;
           }
         case 63: break;
-        case 3: 
-          { positionInc = 1; return CJ;
+        case 39: 
+          { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL); /* Break so we don't hit fall-through warning: */ break;/*end sub header*/
           }
         case 64: break;
-        case 38: 
-          { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL);/*end sub header*/
+        case 29: 
+          { currentTokType = INTERNAL_LINK; numWikiTokensSeen = 0;  yybegin(INTERNAL_LINK_STATE); /* Break so we don't hit fall-through warning: */ break;
           }
         case 65: break;
-        case 15: 
-          { currentTokType = SUB_HEADING; numWikiTokensSeen = 0; yybegin(STRING);
+        case 46: 
+          { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = CATEGORY;yybegin(CATEGORY_STATE); /* Break so we don't hit fall-through warning: */ break;
           }
         case 66: break;
-        case 30: 
-          { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL);/*end italics*/
+        case 27: 
+          { numLinkToks = 0; yybegin(YYINITIAL); /* Break so we don't hit fall-through warning: */ break;
           }
         case 67: break;
-        case 6: 
-          { yybegin(CATEGORY_STATE); numWikiTokensSeen++; return currentTokType;
+        case 4: 
+          { numWikiTokensSeen = 0; positionInc = 1; currentTokType = EXTERNAL_LINK_URL; yybegin(EXTERNAL_LINK_STATE);/* Break so we don't hit fall-through warning: */ break;
           }
         case 68: break;
-        case 5: 
-          { positionInc = 1;
+        case 38: 
+          { numBalanced = 0;currentTokType = ALPHANUM;yybegin(YYINITIAL); /* Break so we don't hit fall-through warning: */ break;/*end bold*/
           }
         case 69: break;
-        case 19: 
-          { yybegin(STRING); numWikiTokensSeen++; return currentTokType;/* STRING ALPHANUM*/
+        case 13: 
+          { currentTokType = EXTERNAL_LINK; numWikiTokensSeen = 0; yybegin(EXTERNAL_LINK_STATE); /* Break so we don't hit fall-through warning: */ break;
           }
         case 70: break;
-        case 42: 
-          { positionInc = 1; numWikiTokensSeen++; yybegin(EXTERNAL_LINK_STATE); return currentTokType;
+        case 3: 
+          { positionInc = 1; return CJ;
           }
         case 71: break;
-        case 27: 
-          { numLinkToks = 0; yybegin(YYINITIAL);
+        case 45: 
+          { currentTokType = CATEGORY; numWikiTokensSeen = 0; yybegin(CATEGORY_STATE); /* Break so we don't hit fall-through warning: */ break;
           }
         case 72: break;
-        case 11: 
-          { currentTokType = BOLD;  yybegin(THREE_SINGLE_QUOTES_STATE);
+        case 6: 
+          { yybegin(CATEGORY_STATE); numWikiTokensSeen++; return currentTokType;
           }
         case 73: break;
-        case 13: 
-          { currentTokType = EXTERNAL_LINK; numWikiTokensSeen = 0; yybegin(EXTERNAL_LINK_STATE);
+        case 11: 
+          { currentTokType = BOLD;  yybegin(THREE_SINGLE_QUOTES_STATE); /* Break so we don't hit fall-through warning: */ break;
           }
         case 74: break;
-        case 14: 
-          { yybegin(STRING); numWikiTokensSeen++; return currentTokType;
+        case 25: 
+          { numWikiTokensSeen = 0; positionInc = 1; currentTokType = CITATION; yybegin(DOUBLE_BRACE_STATE);/* Break so we don't hit fall-through warning: */ break;
           }
         case 75: break;
-        case 45: 
-          { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = CATEGORY;yybegin(CATEGORY_STATE);
+        case 8: 
+          { /* Break so we don't hit fall-through warning: */ break;/* ignore */
           }
         case 76: break;
-        case 28: 
-          { currentTokType = INTERNAL_LINK; numWikiTokensSeen = 0; yybegin(INTERNAL_LINK_STATE);
+        case 19: 
+          { yybegin(STRING); numWikiTokensSeen++; return currentTokType;/* STRING ALPHANUM*/
           }
         case 77: break;
-        case 37: 
-          { numBalanced = 0;currentTokType = ALPHANUM;yybegin(YYINITIAL);/*end bold*/
+        case 43: 
+          { positionInc = 1; numWikiTokensSeen++; yybegin(EXTERNAL_LINK_STATE); return currentTokType;
           }
         case 78: break;
-        case 9: 
-          { if (numLinkToks == 0){positionInc = 0;} else{positionInc = 1;} numWikiTokensSeen++; currentTokType = EXTERNAL_LINK; yybegin(EXTERNAL_LINK_STATE); numLinkToks++; return currentTokType;
+        case 42: 
+          { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL); /* Break so we don't hit fall-through warning: */ break;/*end bold italics*/
           }
         case 79: break;
-        case 7: 
-          { yybegin(INTERNAL_LINK_STATE); numWikiTokensSeen++; return currentTokType;
+        case 30: 
+          { yybegin(YYINITIAL); /* Break so we don't hit fall-through warning: */ break;
           }
         case 80: break;
-        case 24: 
-          { numWikiTokensSeen = 0; positionInc = 1; currentTokType = INTERNAL_LINK; yybegin(INTERNAL_LINK_STATE);
+        case 14: 
+          { yybegin(STRING); numWikiTokensSeen++; return currentTokType;
           }
         case 81: break;
-        case 40: 
-          { positionInc = 1; return EMAIL;
+        case 9: 
+          { if (numLinkToks == 0){positionInc = 0;} else{positionInc = 1;} numWikiTokensSeen++; currentTokType = EXTERNAL_LINK; yybegin(EXTERNAL_LINK_STATE); numLinkToks++; return currentTokType;
           }
         case 82: break;
-        case 1: 
-          { numWikiTokensSeen = 0;  positionInc = 1;
+        case 7: 
+          { yybegin(INTERNAL_LINK_STATE); numWikiTokensSeen++; return currentTokType;
           }
         case 83: break;
-        case 18: 
-          { /* ignore STRING */
+        case 41: 
+          { positionInc = 1; return EMAIL;
           }
         case 84: break;
-        case 36: 
-          { currentTokType = BOLD_ITALICS;  yybegin(FIVE_SINGLE_QUOTES_STATE);
+        case 28: 
+          { currentTokType = INTERNAL_LINK; numWikiTokensSeen = 0; yybegin(INTERNAL_LINK_STATE); /* Break so we don't hit fall-through warning: */ break;
           }
         case 85: break;
-        case 33: 
-          { positionInc = 1; return HOST;
+        case 23: 
+          { numWikiTokensSeen = 0; positionInc = 1; yybegin(DOUBLE_EQUALS_STATE);/* Break so we don't hit fall-through warning: */ break;
           }
         case 86: break;
-        case 31: 
-          { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = INTERNAL_LINK;yybegin(INTERNAL_LINK_STATE);
+        case 34: 
+          { positionInc = 1; return HOST;
           }
         case 87: break;
-        case 41: 
-          { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL);/*end bold italics*/
+        case 32: 
+          { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = INTERNAL_LINK;yybegin(INTERNAL_LINK_STATE); /* Break so we don't hit fall-through warning: */ break;
           }
         case 88: break;
         case 12: 
           { currentTokType = ITALICS; numWikiTokensSeen++;  yybegin(STRING); return currentTokType;/*italics*/
           }
         case 89: break;
-        case 10: 
-          { numLinkToks = 0; positionInc = 0; yybegin(YYINITIAL);
+        case 24: 
+          { numWikiTokensSeen = 0; positionInc = 1; currentTokType = INTERNAL_LINK; yybegin(INTERNAL_LINK_STATE);/* Break so we don't hit fall-through warning: */ break;
           }
         case 90: break;
+        case 31: 
+          { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL); /* Break so we don't hit fall-through warning: */ break;/*end italics*/
+          }
+        case 91: break;
+        case 26: 
+          { yybegin(YYINITIAL);/* Break so we don't hit fall-through warning: */ break;
+          }
+        case 92: break;
         default: 
           if (zzInput == YYEOF && zzStartRead == zzCurrentPos) {
             zzAtEOF = true;