You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by us...@apache.org on 2013/07/08 19:55:49 UTC

svn commit: r1500862 [2/2] - in /lucene/dev/trunk: lucene/ lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/ lucene/analysis/common/src/test/org/apache/lucene/analysis/core/ lucene/analysis/common/src/test/org/apache/lucene/analysis/m...

Modified: lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (original)
+++ lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java Mon Jul  8 17:55:48 2013
@@ -20,7 +20,6 @@ package org.apache.lucene.search.highlig
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.Reader;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -248,7 +247,7 @@ public class HighlighterTest extends Bas
   private String highlightField(Query query, String fieldName, String text)
       throws IOException, InvalidTokenOffsetsException {
     TokenStream tokenStream = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET)
-        .tokenStream(fieldName, new StringReader(text));
+        .tokenStream(fieldName, text);
     // Assuming "<B>", "</B>" used to highlight
     SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
     QueryScorer scorer = new QueryScorer(query, fieldName, FIELD_NAME);
@@ -269,8 +268,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME,
-          new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
       String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
@@ -348,7 +346,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -377,7 +375,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -406,7 +404,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -431,7 +429,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -455,7 +453,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -479,7 +477,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -505,7 +503,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).getField(NUMERIC_FIELD_NAME).numericValue().toString();
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -533,7 +531,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
           "...");
@@ -555,7 +553,7 @@ public class HighlighterTest extends Bas
 
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
       QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
       Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -585,7 +583,7 @@ public class HighlighterTest extends Bas
   
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 5));
 
@@ -608,7 +606,7 @@ public class HighlighterTest extends Bas
 
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 20));
 
@@ -639,7 +637,7 @@ public class HighlighterTest extends Bas
     
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       highlighter.setTextFragmenter(new SimpleFragmenter(40));
 
@@ -710,7 +708,7 @@ public class HighlighterTest extends Bas
     int maxNumFragmentsRequired = 2;
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
       String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
           "...");
@@ -907,7 +905,7 @@ public class HighlighterTest extends Bas
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
       QueryScorer scorer = new QueryScorer(query, HighlighterTest.FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, text);
 
       Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -931,7 +929,7 @@ public class HighlighterTest extends Bas
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
       QueryScorer scorer = new QueryScorer(query, null);
-      TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, text);
 
       Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -955,7 +953,7 @@ public class HighlighterTest extends Bas
       int maxNumFragmentsRequired = 2;
       String fragmentSeparator = "...";
       QueryScorer scorer = new QueryScorer(query, "random_field", HighlighterTest.FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, text);
 
       Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -1126,7 +1124,7 @@ public class HighlighterTest extends Bas
         numHighlights = 0;
         for (int i = 0; i < hits.totalHits; i++) {
           String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
           Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);
@@ -1187,7 +1185,7 @@ public class HighlighterTest extends Bas
         Highlighter highlighter = getHighlighter(wTerms, HighlighterTest.this);// new
         // Highlighter(new
         // QueryTermScorer(wTerms));
-        TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(texts[0]));
+        TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, texts[0]);
         highlighter.setTextFragmenter(new SimpleFragmenter(2));
 
         String result = highlighter.getBestFragment(tokenStream, texts[0]).trim();
@@ -1196,7 +1194,7 @@ public class HighlighterTest extends Bas
 
         // readjust weights
         wTerms[1].setWeight(50f);
-        tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(texts[0]));
+        tokenStream = analyzer.tokenStream(FIELD_NAME, texts[0]);
         highlighter = getHighlighter(wTerms, HighlighterTest.this);
         highlighter.setTextFragmenter(new SimpleFragmenter(2));
 
@@ -1232,7 +1230,7 @@ public class HighlighterTest extends Bas
         Highlighter highlighter = getHighlighter(query, null, HighlighterTest.this);
 
         // Get 3 best fragments and separate with a "..."
-        TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(s));
+        TokenStream tokenStream = analyzer.tokenStream(null, s);
 
         String result = highlighter.getBestFragments(tokenStream, s, 3, "...");
         String expectedResult = "<B>football</B>-<B>soccer</B> in the euro 2004 <B>footie</B> competition";
@@ -1257,7 +1255,7 @@ public class HighlighterTest extends Bas
 
         for (int i = 0; i < hits.totalHits; i++) {
           String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
           Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);
           String result = highlighter.getBestFragment(tokenStream, text);
@@ -1280,7 +1278,7 @@ public class HighlighterTest extends Bas
 
         for (int i = 0; i < hits.totalHits; i++) {
           String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
 
           Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);// new Highlighter(this, new
@@ -1288,7 +1286,7 @@ public class HighlighterTest extends Bas
           highlighter.setTextFragmenter(new SimpleFragmenter(20));
           String stringResults[] = highlighter.getBestFragments(tokenStream, text, 10);
 
-          tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+          tokenStream = analyzer.tokenStream(FIELD_NAME, text);
           TextFragment fragmentResults[] = highlighter.getBestTextFragments(tokenStream, text,
               true, 10);
 
@@ -1318,7 +1316,7 @@ public class HighlighterTest extends Bas
       public void run() throws Exception {
         numHighlights = 0;
         doSearching(new TermQuery(new Term(FIELD_NAME, "meat")));
-        TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(texts[0]));
+        TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, texts[0]);
         Highlighter highlighter = getHighlighter(query, FIELD_NAME,
             HighlighterTest.this);// new Highlighter(this, new
         // QueryTermScorer(query));
@@ -1432,7 +1430,7 @@ public class HighlighterTest extends Bas
 
         for (int i = 0; i < hits.totalHits; i++) {
           String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
           Highlighter highlighter = getHighlighter(query, FIELD_NAME, HighlighterTest.this, false);
 
           highlighter.setTextFragmenter(new SimpleFragmenter(40));
@@ -1461,7 +1459,7 @@ public class HighlighterTest extends Bas
         doSearching(new TermQuery(new Term(FIELD_NAME, "aninvalidquerywhichshouldyieldnoresults")));
 
         for (String text : texts) {
-          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+          TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
           Highlighter highlighter = getHighlighter(query, FIELD_NAME,
               HighlighterTest.this);
           String result = highlighter.getBestFragment(tokenStream, text);
@@ -1503,7 +1501,7 @@ public class HighlighterTest extends Bas
       }
     });
     highlighter.setTextFragmenter(new SimpleFragmenter(2000));
-    TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(rawDocContent));
+    TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, rawDocContent);
 
     String encodedSnippet = highlighter.getBestFragments(tokenStream, rawDocContent, 1, "");
     // An ugly bit of XML creation:
@@ -1828,7 +1826,7 @@ public class HighlighterTest extends Bas
    * 
    * for (int i = 0; i < hits.totalHits; i++) { String text =
    * searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream
-   * tokenStream=bigramAnalyzer.tokenStream(FIELD_NAME,new StringReader(text));
+   * tokenStream=bigramAnalyzer.tokenStream(FIELD_NAME,text);
    * String highlightedText = highlighter.getBestFragment(tokenStream,text);
    * System.out.println(highlightedText); } }
    */
@@ -1855,7 +1853,7 @@ public class HighlighterTest extends Bas
       final int expectedHighlights) throws Exception {
     for (int i = 0; i < hits.totalHits; i++) {
       String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME);
-      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
+      TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text);
       QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
       Highlighter highlighter = new Highlighter(this, scorer);
 
@@ -2104,7 +2102,7 @@ final class SynonymTokenizer extends Tok
         int maxNumFragmentsRequired = 2;
         String fragmentSeparator = "...";
         Scorer scorer = null;
-        TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
+        TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, text);
         if (mode == QUERY) {
           scorer = new QueryScorer(query);
         } else if (mode == QUERY_TERM) {

Modified: lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java (original)
+++ lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java Mon Jul  8 17:55:48 2013
@@ -17,7 +17,6 @@ package org.apache.lucene.search.highlig
  * limitations under the License.
  */
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.Map;
 
 import org.apache.lucene.analysis.MockAnalyzer;
@@ -89,8 +88,7 @@ public class HighlightCustomQueryTest ex
   private String highlightField(Query query, String fieldName,
       String text) throws IOException, InvalidTokenOffsetsException {
     TokenStream tokenStream = new MockAnalyzer(random(), MockTokenizer.SIMPLE,
-        true, MockTokenFilter.ENGLISH_STOPSET).tokenStream(fieldName,
-        new StringReader(text));
+        true, MockTokenFilter.ENGLISH_STOPSET).tokenStream(fieldName, text);
     // Assuming "<B>", "</B>" used to highlight
     SimpleHTMLFormatter formatter = new SimpleHTMLFormatter();
     MyQueryScorer scorer = new MyQueryScorer(query, fieldName, FIELD_NAME);

Modified: lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (original)
+++ lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java Mon Jul  8 17:55:48 2013
@@ -19,7 +19,6 @@ package org.apache.lucene.search.vectorh
 
 import java.io.IOException;
 import java.io.Reader;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -171,7 +170,7 @@ public abstract class AbstractTestCase e
   protected List<BytesRef> analyze(String text, String field, Analyzer analyzer) throws IOException {
     List<BytesRef> bytesRefs = new ArrayList<BytesRef>();
 
-    TokenStream tokenStream = analyzer.tokenStream(field, new StringReader(text));
+    TokenStream tokenStream = analyzer.tokenStream(field, text);
     TermToBytesRefAttribute termAttribute = tokenStream.getAttribute(TermToBytesRefAttribute.class);
 
     BytesRef bytesRef = termAttribute.getBytesRef();

Modified: lucene/dev/trunk/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (original)
+++ lucene/dev/trunk/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java Mon Jul  8 17:55:48 2013
@@ -18,7 +18,6 @@ package org.apache.lucene.index.memory;
  */
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Comparator;
@@ -291,7 +290,7 @@ public class MemoryIndex {
     
     TokenStream stream;
     try {
-      stream = analyzer.tokenStream(fieldName, new StringReader(text));
+      stream = analyzer.tokenStream(fieldName, text);
     } catch (IOException ex) {
       throw new RuntimeException(ex);
     }

Modified: lucene/dev/trunk/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (original)
+++ lucene/dev/trunk/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java Mon Jul  8 17:55:48 2013
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.Reader;
-import java.io.StringReader;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -378,7 +377,7 @@ public class MemoryIndexTest extends Bas
     SpanQuery wrappedquery = new SpanMultiTermQueryWrapper<RegexpQuery>(regex);
         
     MemoryIndex mindex = new MemoryIndex(random().nextBoolean(),  random().nextInt(50) * 1024 * 1024);
-    mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", new StringReader("hello there")));
+    mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", "hello there"));
 
     // This throws an NPE
     assertEquals(0, mindex.search(wrappedquery), 0.00001f);
@@ -390,7 +389,7 @@ public class MemoryIndexTest extends Bas
     SpanQuery wrappedquery = new SpanOrQuery(new SpanMultiTermQueryWrapper<RegexpQuery>(regex));
 
     MemoryIndex mindex = new MemoryIndex(random().nextBoolean(),  random().nextInt(50) * 1024 * 1024);
-    mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", new StringReader("hello there")));
+    mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", "hello there"));
 
     // This passes though
     assertEquals(0, mindex.search(wrappedquery), 0.00001f);

Modified: lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/analyzing/AnalyzingQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/analyzing/AnalyzingQueryParser.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/analyzing/AnalyzingQueryParser.java (original)
+++ lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/analyzing/AnalyzingQueryParser.java Mon Jul  8 17:55:48 2013
@@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.an
  */
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -165,7 +164,7 @@ public class AnalyzingQueryParser extend
     String analyzed = null;
     TokenStream stream = null;
     try{
-      stream = getAnalyzer().tokenStream(field, new StringReader(chunk));
+      stream = getAnalyzer().tokenStream(field, chunk);
       stream.reset();
       CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
       // get first and hopefully only output token

Modified: lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java (original)
+++ lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java Mon Jul  8 17:55:48 2013
@@ -500,7 +500,7 @@ public abstract class QueryParserBase im
 
     TokenStream source;
     try {
-      source = analyzer.tokenStream(field, new StringReader(queryText));
+      source = analyzer.tokenStream(field, queryText);
       source.reset();
     } catch (IOException e) {
       ParseException p = new ParseException("Unable to initialize TokenStream to analyze query text");
@@ -844,7 +844,7 @@ public abstract class QueryParserBase im
     if (analyzerIn == null) analyzerIn = analyzer;
 
     try {
-      source = analyzerIn.tokenStream(field, new StringReader(part));
+      source = analyzerIn.tokenStream(field, part);
       source.reset();
     } catch (IOException e) {
       throw new RuntimeException("Unable to initialize TokenStream to analyze multiTerm term: " + part, e);

Modified: lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java (original)
+++ lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java Mon Jul  8 17:55:48 2013
@@ -18,7 +18,6 @@ package org.apache.lucene.queryparser.fl
  */
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
@@ -116,7 +115,7 @@ public class AnalyzerQueryNodeProcessor 
 
       TokenStream source;
       try {
-        source = this.analyzer.tokenStream(field, new StringReader(text));
+        source = this.analyzer.tokenStream(field, text);
         source.reset();
       } catch (IOException e1) {
         throw new RuntimeException(e1);

Modified: lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java (original)
+++ lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java Mon Jul  8 17:55:48 2013
@@ -4,7 +4,6 @@
 package org.apache.lucene.queryparser.xml.builders;
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.HashSet;
 import java.util.Set;
 
@@ -75,7 +74,7 @@ public class LikeThisQueryBuilder implem
       stopWordsSet = new HashSet<String>();
       for (String field : fields) {
         try {
-          TokenStream ts = analyzer.tokenStream(field, new StringReader(stopWords));
+          TokenStream ts = analyzer.tokenStream(field, stopWords);
           CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
           ts.reset();
           while (ts.incrementToken()) {

Modified: lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java (original)
+++ lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java Mon Jul  8 17:55:48 2013
@@ -13,9 +13,9 @@ import org.apache.lucene.queryparser.xml
 import org.w3c.dom.Element;
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.List;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -51,7 +51,7 @@ public class SpanOrTermsBuilder extends 
 
     try {
       List<SpanQuery> clausesList = new ArrayList<SpanQuery>();
-      TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(value));
+      TokenStream ts = analyzer.tokenStream(fieldName, value);
       TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
       BytesRef bytes = termAtt.getBytesRef();
       ts.reset();

Modified: lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java (original)
+++ lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java Mon Jul  8 17:55:48 2013
@@ -3,7 +3,6 @@ package org.apache.lucene.queryparser.xm
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Filter;
 import org.apache.lucene.queries.TermsFilter;
 import org.apache.lucene.util.BytesRef;
@@ -13,7 +12,6 @@ import org.apache.lucene.queryparser.xml
 import org.w3c.dom.Element;
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -57,9 +55,8 @@ public class TermsFilterBuilder implemen
     String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
 
     try {
-      TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(text));
+      TokenStream ts = analyzer.tokenStream(fieldName, text);
       TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
-      Term term = null;
       BytesRef bytes = termAtt.getBytesRef();
       ts.reset();
       while (ts.incrementToken()) {

Modified: lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java (original)
+++ lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java Mon Jul  8 17:55:48 2013
@@ -15,7 +15,6 @@ import org.apache.lucene.queryparser.xml
 import org.w3c.dom.Element;
 
 import java.io.IOException;
-import java.io.StringReader;
 
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -53,7 +52,7 @@ public class TermsQueryBuilder implement
     BooleanQuery bq = new BooleanQuery(DOMUtils.getAttribute(e, "disableCoord", false));
     bq.setMinimumNumberShouldMatch(DOMUtils.getAttribute(e, "minimumNumberShouldMatch", 0));
     try {
-      TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(text));
+      TokenStream ts = analyzer.tokenStream(fieldName, text);
       TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
       Term term = null;
       BytesRef bytes = termAtt.getBytesRef();

Modified: lucene/dev/trunk/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java (original)
+++ lucene/dev/trunk/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java Mon Jul  8 17:55:48 2013
@@ -18,7 +18,6 @@ package org.apache.lucene.sandbox.querie
  */
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -194,7 +193,7 @@ public class FuzzyLikeThisQuery extends 
 
   private void addTerms(IndexReader reader, FieldVals f) throws IOException {
     if (f.queryString == null) return;
-    TokenStream ts = analyzer.tokenStream(f.fieldName, new StringReader(f.queryString));
+    TokenStream ts = analyzer.tokenStream(f.fieldName, f.queryString);
     CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
 
     int corpusNumDocs = reader.numDocs();

Modified: lucene/dev/trunk/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java (original)
+++ lucene/dev/trunk/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java Mon Jul  8 17:55:48 2013
@@ -21,7 +21,6 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
@@ -856,7 +855,7 @@ public class AnalyzingSuggester extends 
   
   final Set<IntsRef> toFiniteStrings(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException {
  // Analyze surface form:
-    TokenStream ts = indexAnalyzer.tokenStream("", new StringReader(surfaceForm.utf8ToString()));
+    TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString());
 
     // Create corresponding automaton: labels are bytes
     // from each analyzed token, with byte 0 used as
@@ -881,7 +880,7 @@ public class AnalyzingSuggester extends 
   final Automaton toLookupAutomaton(final CharSequence key) throws IOException {
     // TODO: is there a Reader from a CharSequence?
     // Turn tokenstream into automaton:
-    TokenStream ts = queryAnalyzer.tokenStream("", new StringReader(key.toString()));
+    TokenStream ts = queryAnalyzer.tokenStream("", key.toString());
     Automaton automaton = (getTokenStreamToAutomaton()).toAutomaton(ts);
     ts.close();
 

Modified: lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (original)
+++ lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java Mon Jul  8 17:55:48 2013
@@ -295,15 +295,15 @@ public abstract class BaseTokenStreamTes
   }
   
   public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
-    assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, null, input.length());
+    assertTokenStreamContents(a.tokenStream("dummy", input), output, startOffsets, endOffsets, types, posIncrements, null, input.length());
   }
   
   public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], int posLengths[]) throws IOException {
-    assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.length());
+    assertTokenStreamContents(a.tokenStream("dummy", input), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.length());
   }
 
   public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[], int posLengths[], boolean offsetsAreCorrect) throws IOException {
-    assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.length(), offsetsAreCorrect);
+    assertTokenStreamContents(a.tokenStream("dummy", input), output, startOffsets, endOffsets, types, posIncrements, posLengths, input.length(), offsetsAreCorrect);
   }
   
   public static void assertAnalyzesTo(Analyzer a, String input, String[] output) throws IOException {
@@ -332,7 +332,7 @@ public abstract class BaseTokenStreamTes
   
 
   public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException {
-    assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, null, input.length());
+    assertTokenStreamContents(a.tokenStream("dummy", input), output, startOffsets, endOffsets, types, posIncrements, null, input.length());
   }
   
   public static void assertAnalyzesToReuse(Analyzer a, String input, String[] output) throws IOException {
@@ -891,7 +891,7 @@ public abstract class BaseTokenStreamTes
 
   protected String toDot(Analyzer a, String inputText) throws IOException {
     final StringWriter sw = new StringWriter();
-    final TokenStream ts = a.tokenStream("field", new StringReader(inputText));
+    final TokenStream ts = a.tokenStream("field", inputText);
     ts.reset();
     new TokenStreamToDot(inputText, ts, new PrintWriter(sw)).toDot();
     return sw.toString();
@@ -899,7 +899,7 @@ public abstract class BaseTokenStreamTes
 
   protected void toDotFile(Analyzer a, String inputText, String localFileName) throws IOException {
     Writer w = new OutputStreamWriter(new FileOutputStream(localFileName), "UTF-8");
-    final TokenStream ts = a.tokenStream("field", new StringReader(inputText));
+    final TokenStream ts = a.tokenStream("field", inputText);
     ts.reset();
     new TokenStreamToDot(inputText, ts, new PrintWriter(w)).toDot();
     w.close();

Modified: lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java (original)
+++ lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java Mon Jul  8 17:55:48 2013
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis;
 
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -33,7 +32,6 @@ import org.apache.lucene.index.Directory
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
@@ -199,13 +197,13 @@ public abstract class CollationTestBase 
       doc.add(new Field("tracer", sortData[i][0], customType));
       doc.add(new TextField("contents", sortData[i][1], Field.Store.NO));
       if (sortData[i][2] != null) 
-        doc.add(new TextField("US", usAnalyzer.tokenStream("US", new StringReader(sortData[i][2]))));
+        doc.add(new TextField("US", usAnalyzer.tokenStream("US", sortData[i][2])));
       if (sortData[i][3] != null) 
-        doc.add(new TextField("France", franceAnalyzer.tokenStream("France", new StringReader(sortData[i][3]))));
+        doc.add(new TextField("France", franceAnalyzer.tokenStream("France", sortData[i][3])));
       if (sortData[i][4] != null)
-        doc.add(new TextField("Sweden", swedenAnalyzer.tokenStream("Sweden", new StringReader(sortData[i][4]))));
+        doc.add(new TextField("Sweden", swedenAnalyzer.tokenStream("Sweden", sortData[i][4])));
       if (sortData[i][5] != null) 
-        doc.add(new TextField("Denmark", denmarkAnalyzer.tokenStream("Denmark", new StringReader(sortData[i][5]))));
+        doc.add(new TextField("Denmark", denmarkAnalyzer.tokenStream("Denmark", sortData[i][5])));
       writer.addDocument(doc);
     }
     writer.forceMerge(1);
@@ -260,7 +258,7 @@ public abstract class CollationTestBase 
 
     for (int i = 0; i < numTestPoints; i++) {
       String term = _TestUtil.randomSimpleString(random());
-      TokenStream ts = analyzer.tokenStream("fake", new StringReader(term));
+      TokenStream ts = analyzer.tokenStream("fake", term);
       TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
       BytesRef bytes = termAtt.getBytesRef();
       ts.reset();
@@ -279,7 +277,7 @@ public abstract class CollationTestBase 
             for (Map.Entry<String,BytesRef> mapping : map.entrySet()) {
               String term = mapping.getKey();
               BytesRef expected = mapping.getValue();
-              TokenStream ts = analyzer.tokenStream("fake", new StringReader(term));
+              TokenStream ts = analyzer.tokenStream("fake", term);
               TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
               BytesRef bytes = termAtt.getBytesRef();
               ts.reset();

Modified: lucene/dev/trunk/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java (original)
+++ lucene/dev/trunk/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java Mon Jul  8 17:55:48 2013
@@ -19,7 +19,6 @@ package org.apache.solr.schema;
 
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.StringReader;
 import java.util.Map;
 
 import org.apache.commons.io.IOUtils;
@@ -27,8 +26,6 @@ import org.apache.lucene.analysis.Analyz
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.collation.ICUCollationKeyAnalyzer;
-import org.apache.lucene.index.GeneralField;
-import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
@@ -240,7 +237,7 @@ public class ICUCollationField extends F
     TokenStream source;
       
     try {
-      source = analyzer.tokenStream(field, new StringReader(part));
+      source = analyzer.tokenStream(field, part);
       source.reset();
     } catch (IOException e) {
       throw new RuntimeException("Unable to initialize TokenStream to analyze range part: " + part, e);

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java Mon Jul  8 17:55:48 2013
@@ -86,7 +86,7 @@ public abstract class AnalysisRequestHan
 
       TokenStream tokenStream = null;
       try {
-        tokenStream = analyzer.tokenStream(context.getFieldName(), new StringReader(value));
+        tokenStream = analyzer.tokenStream(context.getFieldName(), value);
       } catch (IOException e) {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
       }
@@ -140,7 +140,7 @@ public abstract class AnalysisRequestHan
   protected Set<BytesRef> getQueryTokenSet(String query, Analyzer analyzer) {
     try {
       final Set<BytesRef> tokens = new HashSet<BytesRef>();
-      final TokenStream tokenStream = analyzer.tokenStream("", new StringReader(query));
+      final TokenStream tokenStream = analyzer.tokenStream("", query);
       final TermToBytesRefAttribute bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class);
       final BytesRef bytes = bytesAtt.getBytesRef();
 

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java Mon Jul  8 17:55:48 2013
@@ -72,7 +72,6 @@ import javax.xml.xpath.XPathFactory;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.StringReader;
 import java.net.MalformedURLException;
 import java.net.URL;
 import java.util.ArrayList;
@@ -344,7 +343,7 @@ public class QueryElevationComponent ext
       return query;
     }
     StringBuilder norm = new StringBuilder();
-    TokenStream tokens = analyzer.tokenStream("", new StringReader(query));
+    TokenStream tokens = analyzer.tokenStream("", query);
     tokens.reset();
 
     CharTermAttribute termAtt = tokens.addAttribute(CharTermAttribute.class);

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java Mon Jul  8 17:55:48 2013
@@ -18,7 +18,6 @@
 package org.apache.solr.handler.component;
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -464,7 +463,7 @@ public class SpellCheckComponent extends
   private Collection<Token> getTokens(String q, Analyzer analyzer) throws IOException {
     Collection<Token> result = new ArrayList<Token>();
     assert analyzer != null;
-    TokenStream ts = analyzer.tokenStream("", new StringReader(q));
+    TokenStream ts = analyzer.tokenStream("", q);
     ts.reset();
     // TODO: support custom attributes
     CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java Mon Jul  8 17:55:48 2013
@@ -48,7 +48,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.*;
 
 /**
@@ -636,7 +635,7 @@ public class DefaultSolrHighlighter exte
   private TokenStream createAnalyzerTStream(IndexSchema schema, String fieldName, String docText) throws IOException {
 
     TokenStream tstream;
-    TokenStream ts = schema.getAnalyzer().tokenStream(fieldName, new StringReader(docText));
+    TokenStream ts = schema.getAnalyzer().tokenStream(fieldName, docText);
     ts.reset();
     tstream = new TokenOrderingFilter(ts, 10);
     return tstream;

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java Mon Jul  8 17:55:48 2013
@@ -405,7 +405,7 @@ public abstract class SolrQueryParserBas
 
     TokenStream source;
     try {
-      source = analyzer.tokenStream(field, new StringReader(queryText));
+      source = analyzer.tokenStream(field, queryText);
       source.reset();
     } catch (IOException e) {
       throw new SyntaxError("Unable to initialize TokenStream to analyze query text", e);

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/schema/CollationField.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/schema/CollationField.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/schema/CollationField.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/schema/CollationField.java Mon Jul  8 17:55:48 2013
@@ -19,7 +19,6 @@ package org.apache.solr.schema;
 
 import java.io.IOException;
 import java.io.InputStream;
-import java.io.StringReader;
 import java.text.Collator;
 import java.text.ParseException;
 import java.text.RuleBasedCollator;
@@ -31,8 +30,6 @@ import org.apache.lucene.analysis.Analyz
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.collation.CollationKeyAnalyzer;
-import org.apache.lucene.index.GeneralField;
-import org.apache.lucene.index.IndexableField;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.SortField;
@@ -216,7 +213,7 @@ public class CollationField extends Fiel
     TokenStream source;
       
     try {
-      source = analyzer.tokenStream(field, new StringReader(part));
+      source = analyzer.tokenStream(field, part);
       source.reset();
     } catch (IOException e) {
       throw new RuntimeException("Unable to initialize TokenStream to analyze range part: " + part, e);

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/schema/TextField.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/schema/TextField.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/schema/TextField.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/schema/TextField.java Mon Jul  8 17:55:48 2013
@@ -35,7 +35,6 @@ import java.util.Map;
 import java.util.List;
 import java.util.ArrayList;
 import java.io.IOException;
-import java.io.StringReader;
 
 /** <code>TextField</code> is the basic type for configurable text analysis.
  * Analyzers for field types using this implementation should be defined in the schema.
@@ -141,7 +140,7 @@ public class TextField extends FieldType
 
     TokenStream source;
     try {
-      source = analyzerIn.tokenStream(field, new StringReader(part));
+      source = analyzerIn.tokenStream(field, part);
       source.reset();
     } catch (IOException e) {
       throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to initialize TokenStream to analyze multiTerm term: " + part, e);
@@ -181,7 +180,7 @@ public class TextField extends FieldType
 
     TokenStream source;
     try {
-      source = analyzer.tokenStream(field, new StringReader(queryText));
+      source = analyzer.tokenStream(field, queryText);
       source.reset();
     } catch (IOException e) {
       throw new RuntimeException("Unable to initialize TokenStream to analyze query text", e);

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/SpellingQueryConverter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/SpellingQueryConverter.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/SpellingQueryConverter.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/SpellingQueryConverter.java Mon Jul  8 17:55:48 2013
@@ -18,8 +18,6 @@
 package org.apache.solr.spelling;
 
 import java.io.IOException;
-import java.io.Reader;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -28,11 +26,10 @@ import java.util.regex.Pattern;
 
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
 import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
 import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
 
 
@@ -160,7 +157,7 @@ public class SpellingQueryConverter exte
         flagValue = TERM_PRECEDES_NEW_BOOLEAN_OPERATOR_FLAG;
       }
       try {
-        analyze(result, new StringReader(word), startIndex, flagValue);
+        analyze(result, word, startIndex, flagValue);
       } catch (IOException e) {
         // TODO: shouldn't we log something?
       }   
@@ -174,7 +171,7 @@ public class SpellingQueryConverter exte
     return result;
   }
   
-  protected void analyze(Collection<Token> result, Reader text, int offset, int flagsAttValue) throws IOException {
+  protected void analyze(Collection<Token> result, String text, int offset, int flagsAttValue) throws IOException {
     TokenStream stream = analyzer.tokenStream("", text);
     // TODO: support custom attributes
     CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);

Modified: lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/SuggestQueryConverter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/SuggestQueryConverter.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/SuggestQueryConverter.java (original)
+++ lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/SuggestQueryConverter.java Mon Jul  8 17:55:48 2013
@@ -18,7 +18,6 @@ package org.apache.solr.spelling;
  */
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -38,7 +37,7 @@ public class SuggestQueryConverter exten
 
     Collection<Token> result = new ArrayList<Token>();
     try {
-      analyze(result, new StringReader(original), 0, 0);
+      analyze(result, original, 0, 0);
     } catch (IOException e) {
       throw new RuntimeException(e);
     }

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/TestTrie.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/TestTrie.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/TestTrie.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/TestTrie.java Mon Jul  8 17:55:48 2013
@@ -27,7 +27,6 @@ import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.StringReader;
 import java.text.SimpleDateFormat;
 import java.util.Locale;
 import java.util.TimeZone;
@@ -57,7 +56,7 @@ public class TestTrie extends SolrTestCa
     assertTrue(type instanceof TrieField);
     
     String value = String.valueOf(random().nextInt());
-    TokenStream ts = type.getAnalyzer().tokenStream("dummy", new StringReader(value));
+    TokenStream ts = type.getAnalyzer().tokenStream("dummy", value);
     OffsetAttribute ofsAtt = ts.addAttribute(OffsetAttribute.class);
     ts.reset();
     int count = 0;
@@ -74,7 +73,7 @@ public class TestTrie extends SolrTestCa
     ts.close();
     
     // Test empty one:
-    ts = type.getAnalyzer().tokenStream("dummy", new StringReader(""));
+    ts = type.getAnalyzer().tokenStream("dummy", "");
     ts.reset();
     assertFalse(ts.incrementToken());
     ts.end();

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java Mon Jul  8 17:55:48 2013
@@ -86,7 +86,7 @@ public class TestReversedWildcardFilterF
     String text = "one two three si\uD834\uDD1Ex";
 
     // field one
-    TokenStream input = a.tokenStream("one", new StringReader(text));
+    TokenStream input = a.tokenStream("one", text);
     assertTokenStreamContents(input,
         new String[] { "\u0001eno", "one", "\u0001owt", "two", 
           "\u0001eerht", "three", "\u0001x\uD834\uDD1Eis", "si\uD834\uDD1Ex" },
@@ -95,7 +95,7 @@ public class TestReversedWildcardFilterF
         new int[] { 1, 0, 1, 0, 1, 0, 1, 0 }
     );
     // field two
-    input = a.tokenStream("two", new StringReader(text));
+    input = a.tokenStream("two", text);
     assertTokenStreamContents(input,
         new String[] { "\u0001eno", "\u0001owt", 
           "\u0001eerht", "\u0001x\uD834\uDD1Eis" },
@@ -104,7 +104,7 @@ public class TestReversedWildcardFilterF
         new int[] { 1, 1, 1, 1 }
     );
     // field three
-    input = a.tokenStream("three", new StringReader(text));
+    input = a.tokenStream("three", text);
     assertTokenStreamContents(input,
         new String[] { "one", "two", "three", "si\uD834\uDD1Ex" },
         new int[] { 0, 4, 8, 14 },

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java Mon Jul  8 17:55:48 2013
@@ -30,7 +30,6 @@ import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.StringReader;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
@@ -175,7 +174,7 @@ public class HighlighterTest extends Sol
   public void testTermOffsetsTokenStream() throws Exception {
     String[] multivalued = { "a b c d", "e f g", "h", "i j k l m n" };
     Analyzer a1 = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
-    TokenStream tokenStream = a1.tokenStream("", new StringReader("a b c d e f g h i j k l m n"));
+    TokenStream tokenStream = a1.tokenStream("", "a b c d e f g h i j k l m n");
     tokenStream.reset();
 
     TermOffsetsTokenStream tots = new TermOffsetsTokenStream(
@@ -183,7 +182,7 @@ public class HighlighterTest extends Sol
     for( String v : multivalued ){
       TokenStream ts1 = tots.getMultiValuedTokenStream( v.length() );
       Analyzer a2 = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
-      TokenStream ts2 = a2.tokenStream("", new StringReader(v));
+      TokenStream ts2 = a2.tokenStream("", v);
       ts2.reset();
 
       while (ts1.incrementToken()) {

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/spelling/SimpleQueryConverter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/spelling/SimpleQueryConverter.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/spelling/SimpleQueryConverter.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/spelling/SimpleQueryConverter.java Mon Jul  8 17:55:48 2013
@@ -29,7 +29,6 @@ import org.apache.lucene.util.Version;
 
 import java.util.Collection;
 import java.util.HashSet;
-import java.io.StringReader;
 import java.io.IOException;
 
 
@@ -44,7 +43,7 @@ class SimpleQueryConverter extends Spell
     try {
       Collection<Token> result = new HashSet<Token>();
       WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_40);
-      TokenStream ts = analyzer.tokenStream("", new StringReader(origQuery));
+      TokenStream ts = analyzer.tokenStream("", origQuery);
       // TODO: support custom attributes
       CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
       OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);