You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by us...@apache.org on 2013/07/08 19:55:49 UTC

svn commit: r1500862 [1/2] - in /lucene/dev/trunk: lucene/ lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/ lucene/analysis/common/src/test/org/apache/lucene/analysis/core/ lucene/analysis/common/src/test/org/apache/lucene/analysis/m...

Author: uschindler
Date: Mon Jul  8 17:55:48 2013
New Revision: 1500862

URL: http://svn.apache.org/r1500862
Log:
LUCENE-5097: Analyzer now has an additional tokenStream(String fieldName, String text) method, so wrapping by StringReader for common use is no longer needed. This method uses an internal reuseable reader, which was previously only used by the Field class.

Added:
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/ReusableStringReader.java   (with props)
    lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/analysis/TestReusableStringReader.java   (with props)
Modified:
    lucene/dev/trunk/lucene/CHANGES.txt
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenPositionFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
    lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
    lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java
    lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java
    lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java
    lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java
    lucene/dev/trunk/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java
    lucene/dev/trunk/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java
    lucene/dev/trunk/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java
    lucene/dev/trunk/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java
    lucene/dev/trunk/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
    lucene/dev/trunk/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java
    lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/document/Field.java
    lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
    lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/document/TestField.java
    lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
    lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
    lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
    lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
    lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
    lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
    lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
    lucene/dev/trunk/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
    lucene/dev/trunk/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
    lucene/dev/trunk/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/analyzing/AnalyzingQueryParser.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java
    lucene/dev/trunk/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java
    lucene/dev/trunk/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
    lucene/dev/trunk/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
    lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java
    lucene/dev/trunk/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
    lucene/dev/trunk/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/schema/CollationField.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/schema/TextField.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/SpellingQueryConverter.java
    lucene/dev/trunk/solr/core/src/java/org/apache/solr/spelling/SuggestQueryConverter.java
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/TestTrie.java
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java
    lucene/dev/trunk/solr/core/src/test/org/apache/solr/spelling/SimpleQueryConverter.java

Modified: lucene/dev/trunk/lucene/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/CHANGES.txt?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/CHANGES.txt (original)
+++ lucene/dev/trunk/lucene/CHANGES.txt Mon Jul  8 17:55:48 2013
@@ -300,6 +300,11 @@ API Changes
   an overhead parameter, so you can easily pass a different value other than
   PackedInts.FASTEST from your own codec.  (Robert Muir)
   
+* LUCENE-5097: Analyzer now has an additional tokenStream(String fieldName,
+  String text) method, so wrapping by StringReader for common use is no
+  longer needed. This method uses an internal reuseable reader, which was
+  previously only used by the Field class.  (Uwe Schindler, Robert Muir)
+  
 Build
 
 * LUCENE-4987: Upgrade randomized testing to version 2.0.10: 

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java Mon Jul  8 17:55:48 2013
@@ -18,7 +18,6 @@ package org.apache.lucene.analysis.synon
  */
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -112,7 +111,7 @@ public class SynonymMap {
      *  separates by {@link SynonymMap#WORD_SEPARATOR}.
      *  reuse and its chars must not be null. */
     public static CharsRef analyze(Analyzer analyzer, String text, CharsRef reuse) throws IOException {
-      TokenStream ts = analyzer.tokenStream("", new StringReader(text));
+      TokenStream ts = analyzer.tokenStream("", text);
       CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
       PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);
       ts.reset();

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java Mon Jul  8 17:55:48 2013
@@ -24,7 +24,6 @@ import org.apache.lucene.analysis.tokena
 import org.apache.lucene.analysis.util.CharArraySet;
 import org.apache.lucene.util.Version;
 
-import java.io.StringReader;
 import java.io.IOException;
 import java.util.Iterator;
 import java.util.Set;
@@ -47,8 +46,7 @@ public class TestStopAnalyzer extends Ba
 
   public void testDefaults() throws IOException {
     assertTrue(stop != null);
-    StringReader reader = new StringReader("This is a test of the english stop analyzer");
-    TokenStream stream = stop.tokenStream("test", reader);
+    TokenStream stream = stop.tokenStream("test", "This is a test of the english stop analyzer");
     assertTrue(stream != null);
     CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
     stream.reset();
@@ -61,8 +59,7 @@ public class TestStopAnalyzer extends Ba
   public void testStopList() throws IOException {
     CharArraySet stopWordsSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("good", "test", "analyzer"), false);
     StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_40, stopWordsSet);
-    StringReader reader = new StringReader("This is a good test of the english stop analyzer");
-    TokenStream stream = newStop.tokenStream("test", reader);
+    TokenStream stream = newStop.tokenStream("test", "This is a good test of the english stop analyzer");
     assertNotNull(stream);
     CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
     
@@ -76,9 +73,9 @@ public class TestStopAnalyzer extends Ba
   public void testStopListPositions() throws IOException {
     CharArraySet stopWordsSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("good", "test", "analyzer"), false);
     StopAnalyzer newStop = new StopAnalyzer(TEST_VERSION_CURRENT, stopWordsSet);
-    StringReader reader = new StringReader("This is a good test of the english stop analyzer with positions");
-    int expectedIncr[] =                  { 1,   1, 1,          3, 1,  1,      1,            2,   1};
-    TokenStream stream = newStop.tokenStream("test", reader);
+    String s =             "This is a good test of the english stop analyzer with positions";
+    int expectedIncr[] =  { 1,   1, 1,          3, 1,  1,      1,            2,   1};
+    TokenStream stream = newStop.tokenStream("test", s);
     assertNotNull(stream);
     int i = 0;
     CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java Mon Jul  8 17:55:48 2013
@@ -18,13 +18,10 @@ package org.apache.lucene.analysis.misce
  */
 
 import java.io.IOException;
-import java.io.StringReader;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.DirectoryReader;
@@ -47,14 +44,14 @@ public class TestLimitTokenCountAnalyzer
       Analyzer a = new LimitTokenCountAnalyzer(mock, 2, consumeAll);
     
       // dont use assertAnalyzesTo here, as the end offset is not the end of the string (unless consumeAll is true, in which case its correct)!
-      assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1  2     3  4  5")), new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? 16 : null);
-      assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, consumeAll ? 9 : null);
+      assertTokenStreamContents(a.tokenStream("dummy", "1  2     3  4  5"), new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? 16 : null);
+      assertTokenStreamContents(a.tokenStream("dummy", "1 2 3 4 5"), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, consumeAll ? 9 : null);
       
       // less than the limit, ensure we behave correctly
-      assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1  ")), new String[] { "1" }, new int[] { 0 }, new int[] { 1 }, consumeAll ? 3 : null);
+      assertTokenStreamContents(a.tokenStream("dummy", "1  "), new String[] { "1" }, new int[] { 0 }, new int[] { 1 }, consumeAll ? 3 : null);
     
       // equal to limit
-      assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1  2  ")), new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? 6 : null);
+      assertTokenStreamContents(a.tokenStream("dummy", "1  2  "), new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? 6 : null);
     }
   }
 

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenPositionFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenPositionFilter.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenPositionFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenPositionFilter.java Mon Jul  8 17:55:48 2013
@@ -43,17 +43,17 @@ public class TestLimitTokenPositionFilte
       };
 
       // dont use assertAnalyzesTo here, as the end offset is not the end of the string (unless consumeAll is true, in which case its correct)!
-      assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1  2     3  4  5")), 
+      assertTokenStreamContents(a.tokenStream("dummy", "1  2     3  4  5"), 
                                 new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? 16 : null);
       assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1 2 3 4 5")), 
                                 new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, consumeAll ? 9 : null);
 
       // less than the limit, ensure we behave correctly
-      assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1  ")),
+      assertTokenStreamContents(a.tokenStream("dummy", "1  "),
                                 new String[] { "1" }, new int[] { 0 }, new int[] { 1 }, consumeAll ? 3 : null);
                                                                                    
       // equal to limit
-      assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1  2  ")), 
+      assertTokenStreamContents(a.tokenStream("dummy", "1  2  "), 
                                 new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, consumeAll ? 6 : null);
     }
   }

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java Mon Jul  8 17:55:48 2013
@@ -1,7 +1,6 @@
 package org.apache.lucene.analysis.miscellaneous;
 
 import java.io.Reader;
-import java.io.StringReader;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
@@ -38,8 +37,7 @@ public class TestPerFieldAnalyzerWrapper
     PerFieldAnalyzerWrapper analyzer =
               new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), analyzerPerField);
 
-    TokenStream tokenStream = analyzer.tokenStream("field",
-        new StringReader(text));
+    TokenStream tokenStream = analyzer.tokenStream("field", text);
     CharTermAttribute termAtt = tokenStream.getAttribute(CharTermAttribute.class);
     tokenStream.reset();
 
@@ -48,8 +46,7 @@ public class TestPerFieldAnalyzerWrapper
                  "Qwerty",
                  termAtt.toString());
 
-    tokenStream = analyzer.tokenStream("special",
-        new StringReader(text));
+    tokenStream = analyzer.tokenStream("special", text);
     termAtt = tokenStream.getAttribute(CharTermAttribute.class);
     tokenStream.reset();
 

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java Mon Jul  8 17:55:48 2013
@@ -26,7 +26,6 @@ import org.apache.lucene.index.IndexWrit
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.store.RAMDirectory;
 
-import java.io.StringReader;
 import java.util.Arrays;
 import java.util.Collections;
 
@@ -66,44 +65,44 @@ public class QueryAutoStopWordAnalyzerTe
   public void testNoStopwords() throws Exception {
     // Note: an empty list of fields passed in
     protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Collections.<String>emptyList(), 1);
-    TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("variedField", new StringReader("quick"));
+    TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("variedField", "quick");
     assertTokenStreamContents(protectedTokenStream, new String[]{"quick"});
 
-    protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", new StringReader("boring"));
+    protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     assertTokenStreamContents(protectedTokenStream, new String[]{"boring"});
   }
 
   public void testDefaultStopwordsAllFields() throws Exception {
     protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader);
-    TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", new StringReader("boring"));
+    TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     assertTokenStreamContents(protectedTokenStream, new String[0]); // Default stop word filtering will remove boring
   }
 
   public void testStopwordsAllFieldsMaxPercentDocs() throws Exception {
     protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, 1f / 2f);
 
-    TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", new StringReader("boring"));
+    TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     // A filter on terms in > one half of docs remove boring
     assertTokenStreamContents(protectedTokenStream, new String[0]);
 
-    protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", new StringReader("vaguelyboring"));
+    protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "vaguelyboring");
      // A filter on terms in > half of docs should not remove vaguelyBoring
     assertTokenStreamContents(protectedTokenStream, new String[]{"vaguelyboring"});
 
     protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, 1f / 4f);
-    protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", new StringReader("vaguelyboring"));
+    protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "vaguelyboring");
      // A filter on terms in > quarter of docs should remove vaguelyBoring
     assertTokenStreamContents(protectedTokenStream, new String[0]);
   }
 
   public void testStopwordsPerFieldMaxPercentDocs() throws Exception {
     protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.asList("variedField"), 1f / 2f);
-    TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", new StringReader("boring"));
+    TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     // A filter on one Field should not affect queries on another
     assertTokenStreamContents(protectedTokenStream, new String[]{"boring"});
 
     protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.asList("variedField", "repetitiveField"), 1f / 2f);
-    protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", new StringReader("boring"));
+    protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     // A filter on the right Field should affect queries on it
     assertTokenStreamContents(protectedTokenStream, new String[0]);
   }
@@ -121,11 +120,11 @@ public class QueryAutoStopWordAnalyzerTe
   public void testNoFieldNamePollution() throws Exception {
     protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer, reader, Arrays.asList("repetitiveField"), 10);
 
-    TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", new StringReader("boring"));
+    TokenStream protectedTokenStream = protectedAnalyzer.tokenStream("repetitiveField", "boring");
     // Check filter set up OK
     assertTokenStreamContents(protectedTokenStream, new String[0]);
 
-    protectedTokenStream = protectedAnalyzer.tokenStream("variedField", new StringReader("boring"));
+    protectedTokenStream = protectedAnalyzer.tokenStream("variedField", "boring");
     // Filter should not prevent stopwords in one field being used in another
     assertTokenStreamContents(protectedTokenStream, new String[]{"boring"});
   }
@@ -134,7 +133,7 @@ public class QueryAutoStopWordAnalyzerTe
     QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(
         TEST_VERSION_CURRENT,
         new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false), reader, 10);
-    TokenStream ts = a.tokenStream("repetitiveField", new StringReader("this boring"));
+    TokenStream ts = a.tokenStream("repetitiveField", "this boring");
     assertTokenStreamContents(ts, new String[] { "this" });
   }
 }

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java Mon Jul  8 17:55:48 2013
@@ -17,8 +17,6 @@ package org.apache.lucene.analysis.shing
  * limitations under the License.
  */
 
-import java.io.StringReader;
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.analysis.MockAnalyzer;
@@ -97,7 +95,7 @@ public class ShingleAnalyzerWrapperTest 
   public void testShingleAnalyzerWrapperPhraseQuery() throws Exception {
     PhraseQuery q = new PhraseQuery();
 
-    TokenStream ts = analyzer.tokenStream("content", new StringReader("this sentence"));
+    TokenStream ts = analyzer.tokenStream("content", "this sentence");
     int j = -1;
     
     PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
@@ -123,7 +121,7 @@ public class ShingleAnalyzerWrapperTest 
   public void testShingleAnalyzerWrapperBooleanQuery() throws Exception {
     BooleanQuery q = new BooleanQuery();
 
-    TokenStream ts = analyzer.tokenStream("content", new StringReader("test sentence"));
+    TokenStream ts = analyzer.tokenStream("content", "test sentence");
     
     CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
     

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java Mon Jul  8 17:55:48 2013
@@ -91,7 +91,7 @@ public class TestTeeSinkTokenFilter exte
     Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
     Document doc = new Document();
-    TokenStream tokenStream = analyzer.tokenStream("field", new StringReader("abcd   "));
+    TokenStream tokenStream = analyzer.tokenStream("field", "abcd   ");
     TeeSinkTokenFilter tee = new TeeSinkTokenFilter(tokenStream);
     TokenStream sink = tee.newSinkTokenStream();
     FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java Mon Jul  8 17:55:48 2013
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis.th;
 
 import java.io.IOException;
 import java.io.Reader;
-import java.io.StringReader;
 import java.util.Random;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -30,7 +29,6 @@ import org.apache.lucene.analysis.core.K
 import org.apache.lucene.analysis.core.StopAnalyzer;
 import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
 import org.apache.lucene.analysis.util.CharArraySet;
-import org.apache.lucene.util.Version;
 
 /**
  * Test case for ThaiAnalyzer, modified from TestFrenchAnalyzer
@@ -122,10 +120,10 @@ public class TestThaiAnalyzer extends Ba
   public void testAttributeReuse() throws Exception {
     ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
     // just consume
-    TokenStream ts = analyzer.tokenStream("dummy", new StringReader("ภาษาไทย"));
+    TokenStream ts = analyzer.tokenStream("dummy", "ภาษาไทย");
     assertTokenStreamContents(ts, new String[] { "ภาษา", "ไทย" });
     // this consumer adds flagsAtt, which this analyzer does not use. 
-    ts = analyzer.tokenStream("dummy", new StringReader("ภาษาไทย"));
+    ts = analyzer.tokenStream("dummy", "ภาษาไทย");
     ts.addAttribute(FlagsAttribute.class);
     assertTokenStreamContents(ts, new String[] { "ภาษา", "ไทย" });
   }

Modified: lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java Mon Jul  8 17:55:48 2013
@@ -123,7 +123,7 @@ public class TestCharTokenizers extends 
     int num = 1000 * RANDOM_MULTIPLIER;
     for (int i = 0; i < num; i++) {
       String s = _TestUtil.randomUnicodeString(random());
-      TokenStream ts = analyzer.tokenStream("foo", new StringReader(s));
+      TokenStream ts = analyzer.tokenStream("foo", s);
       ts.reset();
       OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
       while (ts.incrementToken()) {
@@ -161,7 +161,7 @@ public class TestCharTokenizers extends 
     int num = 1000 * RANDOM_MULTIPLIER;
     for (int i = 0; i < num; i++) {
       String s = _TestUtil.randomUnicodeString(random());
-      TokenStream ts = analyzer.tokenStream("foo", new StringReader(s));
+      TokenStream ts = analyzer.tokenStream("foo", s);
       ts.reset();
       OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
       while (ts.incrementToken()) {

Modified: lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java (original)
+++ lucene/dev/trunk/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java Mon Jul  8 17:55:48 2013
@@ -249,7 +249,7 @@ public class TestICUTokenizer extends Ba
   }
   
   public void testTokenAttributes() throws Exception {
-    TokenStream ts = a.tokenStream("dummy", new StringReader("This is a test"));
+    TokenStream ts = a.tokenStream("dummy", "This is a test");
     ScriptAttribute scriptAtt = ts.addAttribute(ScriptAttribute.class);
     ts.reset();
     while (ts.incrementToken()) {

Modified: lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java (original)
+++ lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java Mon Jul  8 17:55:48 2013
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis.ja;
 
 import java.io.IOException;
 import java.io.Reader;
-import java.io.StringReader;
 import java.util.Random;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -54,7 +53,7 @@ public class TestExtendedMode extends Ba
     int numIterations = atLeast(1000);
     for (int i = 0; i < numIterations; i++) {
       String s = _TestUtil.randomUnicodeString(random(), 100);
-      TokenStream ts = analyzer.tokenStream("foo", new StringReader(s));
+      TokenStream ts = analyzer.tokenStream("foo", s);
       CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
       ts.reset();
       while (ts.incrementToken()) {

Modified: lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseAnalyzer.java Mon Jul  8 17:55:48 2013
@@ -18,7 +18,6 @@ package org.apache.lucene.analysis.ja;
  */
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.Random;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -151,7 +150,7 @@ public class TestJapaneseAnalyzer extend
                                             Mode.SEARCH,
                                             JapaneseAnalyzer.getDefaultStopSet(),
                                             JapaneseAnalyzer.getDefaultStopTags());
-    assertTokenStreamContents(a.tokenStream("foo", new StringReader("abcd")),
+    assertTokenStreamContents(a.tokenStream("foo", "abcd"),
                               new String[] { "a", "b", "cd"  },
                               new int[] { 0, 1, 2 },
                               new int[] { 1, 2, 4 },

Modified: lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java (original)
+++ lucene/dev/trunk/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java Mon Jul  8 17:55:48 2013
@@ -22,7 +22,6 @@ import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.LineNumberReader;
 import java.io.Reader;
-import java.io.StringReader;
 import java.util.Random;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -142,7 +141,7 @@ public class TestJapaneseTokenizer exten
    * ideally the test would actually fail instead of hanging...
    */
   public void testDecomposition5() throws Exception {
-    TokenStream ts = analyzer.tokenStream("bogus", new StringReader("くよくよくよくよくよくよくよくよくよくよくよくよくよくよくよくよくよくよくよくよ"));
+    TokenStream ts = analyzer.tokenStream("bogus", "くよくよくよくよくよくよくよくよくよくよくよくよくよくよくよくよくよくよくよくよ");
     ts.reset();
     while (ts.incrementToken()) {
       
@@ -166,8 +165,8 @@ public class TestJapaneseTokenizer exten
   /** Tests that sentence offset is incorporated into the resulting offsets */
   public void testTwoSentences() throws Exception {
     /*
-    //TokenStream ts = a.tokenStream("foo", new StringReader("妹の咲子です。俺と年子で、今受験生です。"));
-    TokenStream ts = analyzer.tokenStream("foo", new StringReader("&#x250cdf66<!--\"<!--#<!--;?><!--#<!--#><!---->?>-->;"));
+    //TokenStream ts = a.tokenStream("foo", "妹の咲子です。俺と年子で、今受験生です。");
+    TokenStream ts = analyzer.tokenStream("foo", "&#x250cdf66<!--\"<!--#<!--;?><!--#<!--#><!---->?>-->;");
     ts.reset();
     CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
     while(ts.incrementToken()) {
@@ -214,7 +213,7 @@ public class TestJapaneseTokenizer exten
   public void testLargeDocReliability() throws Exception {
     for (int i = 0; i < 100; i++) {
       String s = _TestUtil.randomUnicodeString(random(), 10000);
-      TokenStream ts = analyzer.tokenStream("foo", new StringReader(s));
+      TokenStream ts = analyzer.tokenStream("foo", s);
       ts.reset();
       while (ts.incrementToken()) {
       }
@@ -235,7 +234,7 @@ public class TestJapaneseTokenizer exten
         System.out.println("\nTEST: iter=" + i);
       }
       String s = _TestUtil.randomUnicodeString(random(), 100);
-      TokenStream ts = analyzer.tokenStream("foo", new StringReader(s));
+      TokenStream ts = analyzer.tokenStream("foo", s);
       CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
       ts.reset();
       while (ts.incrementToken()) {
@@ -245,14 +244,14 @@ public class TestJapaneseTokenizer exten
   }
 
   public void testOnlyPunctuation() throws IOException {
-    TokenStream ts = analyzerNoPunct.tokenStream("foo", new StringReader("。、。。"));
+    TokenStream ts = analyzerNoPunct.tokenStream("foo", "。、。。");
     ts.reset();
     assertFalse(ts.incrementToken());
     ts.end();
   }
 
   public void testOnlyPunctuationExtended() throws IOException {
-    TokenStream ts = extendedModeAnalyzerNoPunct.tokenStream("foo", new StringReader("......"));
+    TokenStream ts = extendedModeAnalyzerNoPunct.tokenStream("foo", "......");
     ts.reset();
     assertFalse(ts.incrementToken());
     ts.end();
@@ -261,14 +260,14 @@ public class TestJapaneseTokenizer exten
   // note: test is kinda silly since kuromoji emits punctuation tokens.
   // but, when/if we filter these out it will be useful.
   public void testEnd() throws Exception {
-    assertTokenStreamContents(analyzerNoPunct.tokenStream("foo", new StringReader("これは本ではない")),
+    assertTokenStreamContents(analyzerNoPunct.tokenStream("foo", "これは本ではない"),
         new String[] { "これ", "は", "本", "で", "は", "ない" },
         new int[] { 0, 2, 3, 4, 5, 6 },
         new int[] { 2, 3, 4, 5, 6, 8 },
         new Integer(8)
     );
 
-    assertTokenStreamContents(analyzerNoPunct.tokenStream("foo", new StringReader("これは本ではない    ")),
+    assertTokenStreamContents(analyzerNoPunct.tokenStream("foo", "これは本ではない    "),
         new String[] { "これ", "は", "本", "で", "は", "ない"  },
         new int[] { 0, 2, 3, 4, 5, 6, 8 },
         new int[] { 2, 3, 4, 5, 6, 8, 9 },
@@ -279,7 +278,7 @@ public class TestJapaneseTokenizer exten
   public void testUserDict() throws Exception {
     // Not a great test because w/o userdict.txt the
     // segmentation is the same:
-    assertTokenStreamContents(analyzer.tokenStream("foo", new StringReader("関西国際空港に行った")),
+    assertTokenStreamContents(analyzer.tokenStream("foo", "関西国際空港に行った"),
                               new String[] { "関西", "国際", "空港", "に", "行っ", "た"  },
                               new int[] { 0, 2, 4, 6, 7, 9 },
                               new int[] { 2, 4, 6, 7, 9, 10 },
@@ -289,7 +288,7 @@ public class TestJapaneseTokenizer exten
 
   public void testUserDict2() throws Exception {
     // Better test: w/o userdict the segmentation is different:
-    assertTokenStreamContents(analyzer.tokenStream("foo", new StringReader("朝青龍")),
+    assertTokenStreamContents(analyzer.tokenStream("foo", "朝青龍"),
                               new String[] { "朝青龍"  },
                               new int[] { 0 },
                               new int[] { 3 },
@@ -299,7 +298,7 @@ public class TestJapaneseTokenizer exten
 
   public void testUserDict3() throws Exception {
     // Test entry that breaks into multiple tokens:
-    assertTokenStreamContents(analyzer.tokenStream("foo", new StringReader("abcd")),
+    assertTokenStreamContents(analyzer.tokenStream("foo", "abcd"),
                               new String[] { "a", "b", "cd"  },
                               new int[] { 0, 1, 2 },
                               new int[] { 1, 2, 4 },
@@ -315,7 +314,7 @@ public class TestJapaneseTokenizer exten
   /*
   public void testUserDict4() throws Exception {
     // Test entry that has another entry as prefix
-    assertTokenStreamContents(analyzer.tokenStream("foo", new StringReader("abcdefghij")),
+    assertTokenStreamContents(analyzer.tokenStream("foo", "abcdefghij"),
                               new String[] { "ab", "cd", "efg", "hij"  },
                               new int[] { 0, 2, 4, 7 },
                               new int[] { 2, 4, 7, 10 },
@@ -366,7 +365,7 @@ public class TestJapaneseTokenizer exten
   }
 
   private void assertReadings(String input, String... readings) throws IOException {
-    TokenStream ts = analyzer.tokenStream("ignored", new StringReader(input));
+    TokenStream ts = analyzer.tokenStream("ignored", input);
     ReadingAttribute readingAtt = ts.addAttribute(ReadingAttribute.class);
     ts.reset();
     for(String reading : readings) {
@@ -378,7 +377,7 @@ public class TestJapaneseTokenizer exten
   }
 
   private void assertPronunciations(String input, String... pronunciations) throws IOException {
-    TokenStream ts = analyzer.tokenStream("ignored", new StringReader(input));
+    TokenStream ts = analyzer.tokenStream("ignored", input);
     ReadingAttribute readingAtt = ts.addAttribute(ReadingAttribute.class);
     ts.reset();
     for(String pronunciation : pronunciations) {
@@ -390,7 +389,7 @@ public class TestJapaneseTokenizer exten
   }
   
   private void assertBaseForms(String input, String... baseForms) throws IOException {
-    TokenStream ts = analyzer.tokenStream("ignored", new StringReader(input));
+    TokenStream ts = analyzer.tokenStream("ignored", input);
     BaseFormAttribute baseFormAtt = ts.addAttribute(BaseFormAttribute.class);
     ts.reset();
     for(String baseForm : baseForms) {
@@ -402,7 +401,7 @@ public class TestJapaneseTokenizer exten
   }
 
   private void assertInflectionTypes(String input, String... inflectionTypes) throws IOException {
-    TokenStream ts = analyzer.tokenStream("ignored", new StringReader(input));
+    TokenStream ts = analyzer.tokenStream("ignored", input);
     InflectionAttribute inflectionAtt = ts.addAttribute(InflectionAttribute.class);
     ts.reset();
     for(String inflectionType : inflectionTypes) {
@@ -414,7 +413,7 @@ public class TestJapaneseTokenizer exten
   }
 
   private void assertInflectionForms(String input, String... inflectionForms) throws IOException {
-    TokenStream ts = analyzer.tokenStream("ignored", new StringReader(input));
+    TokenStream ts = analyzer.tokenStream("ignored", input);
     InflectionAttribute inflectionAtt = ts.addAttribute(InflectionAttribute.class);
     ts.reset();
     for(String inflectionForm : inflectionForms) {
@@ -426,7 +425,7 @@ public class TestJapaneseTokenizer exten
   }
   
   private void assertPartsOfSpeech(String input, String... partsOfSpeech) throws IOException {
-    TokenStream ts = analyzer.tokenStream("ignored", new StringReader(input));
+    TokenStream ts = analyzer.tokenStream("ignored", input);
     PartOfSpeechAttribute partOfSpeechAtt = ts.addAttribute(PartOfSpeechAttribute.class);
     ts.reset();
     for(String partOfSpeech : partsOfSpeech) {
@@ -619,7 +618,7 @@ public class TestJapaneseTokenizer exten
     if (numIterations > 1) {
       // warmup
       for (int i = 0; i < numIterations; i++) {
-        final TokenStream ts = analyzer.tokenStream("ignored", new StringReader(line));
+        final TokenStream ts = analyzer.tokenStream("ignored", line);
         ts.reset();
         while(ts.incrementToken());
       }
@@ -628,7 +627,7 @@ public class TestJapaneseTokenizer exten
 
     long totalStart = System.currentTimeMillis();
     for (int i = 0; i < numIterations; i++) {
-      final TokenStream ts = analyzer.tokenStream("ignored", new StringReader(line));
+      final TokenStream ts = analyzer.tokenStream("ignored", line);
       ts.reset();
       while(ts.incrementToken());
     }
@@ -640,7 +639,7 @@ public class TestJapaneseTokenizer exten
     totalStart = System.currentTimeMillis();
     for (int i = 0; i < numIterations; i++) {
       for (String sentence: sentences) {
-        final TokenStream ts = analyzer.tokenStream("ignored", new StringReader(sentence));
+        final TokenStream ts = analyzer.tokenStream("ignored", sentence);
         ts.reset();
         while(ts.incrementToken());
       }

Modified: lucene/dev/trunk/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java Mon Jul  8 17:55:48 2013
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis.morfo
 
 import java.io.IOException;
 import java.io.Reader;
-import java.io.StringReader;
 import java.util.TreeSet;
 
 import org.apache.lucene.analysis.Analyzer;
@@ -73,7 +72,7 @@ public class TestMorfologikAnalyzer exte
 
   @SuppressWarnings("unused")
   private void dumpTokens(String input) throws IOException {
-    TokenStream ts = getTestAnalyzer().tokenStream("dummy", new StringReader(input));
+    TokenStream ts = getTestAnalyzer().tokenStream("dummy", input);
     ts.reset();
 
     MorphosyntacticTagsAttribute attribute = ts.getAttribute(MorphosyntacticTagsAttribute.class);
@@ -86,7 +85,7 @@ public class TestMorfologikAnalyzer exte
   /** Test reuse of MorfologikFilter with leftover stems. */
   public final void testLeftoverStems() throws IOException {
     Analyzer a = getTestAnalyzer();
-    TokenStream ts_1 = a.tokenStream("dummy", new StringReader("liście"));
+    TokenStream ts_1 = a.tokenStream("dummy", "liście");
     CharTermAttribute termAtt_1 = ts_1.getAttribute(CharTermAttribute.class);
     ts_1.reset();
     ts_1.incrementToken();
@@ -94,7 +93,7 @@ public class TestMorfologikAnalyzer exte
     ts_1.end();
     ts_1.close();
 
-    TokenStream ts_2 = a.tokenStream("dummy", new StringReader("danych"));
+    TokenStream ts_2 = a.tokenStream("dummy", "danych");
     CharTermAttribute termAtt_2 = ts_2.getAttribute(CharTermAttribute.class);
     ts_2.reset();
     ts_2.incrementToken();
@@ -141,7 +140,7 @@ public class TestMorfologikAnalyzer exte
 
   /** Test morphosyntactic annotations. */
   public final void testPOSAttribute() throws IOException {
-    TokenStream ts = getTestAnalyzer().tokenStream("dummy", new StringReader("liście"));
+    TokenStream ts = getTestAnalyzer().tokenStream("dummy", "liście");
 
     ts.reset();
     assertPOSToken(ts, "liście",  

Modified: lucene/dev/trunk/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java Mon Jul  8 17:55:48 2013
@@ -19,7 +19,6 @@ package org.apache.lucene.analysis.cn.sm
 
 import java.io.IOException;
 import java.io.Reader;
-import java.io.StringReader;
 import java.util.Random;
 
 import org.apache.lucene.analysis.BaseTokenStreamTestCase;
@@ -185,7 +184,7 @@ public class TestSmartChineseAnalyzer ex
       sb.append("我购买了道具和服装。");
     }
     Analyzer analyzer = new SmartChineseAnalyzer(TEST_VERSION_CURRENT);
-    TokenStream stream = analyzer.tokenStream("", new StringReader(sb.toString()));
+    TokenStream stream = analyzer.tokenStream("", sb.toString());
     stream.reset();
     while (stream.incrementToken()) {
     }
@@ -198,7 +197,7 @@ public class TestSmartChineseAnalyzer ex
       sb.append("我购买了道具和服装");
     }
     Analyzer analyzer = new SmartChineseAnalyzer(TEST_VERSION_CURRENT);
-    TokenStream stream = analyzer.tokenStream("", new StringReader(sb.toString()));
+    TokenStream stream = analyzer.tokenStream("", sb.toString());
     stream.reset();
     while (stream.incrementToken()) {
     }

Modified: lucene/dev/trunk/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java (original)
+++ lucene/dev/trunk/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java Mon Jul  8 17:55:48 2013
@@ -35,7 +35,6 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.StringReader;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -62,7 +61,7 @@ public class UIMABaseAnalyzerTest extend
 
   @Test
   public void baseUIMAAnalyzerStreamTest() throws Exception {
-    TokenStream ts = analyzer.tokenStream("text", new StringReader("the big brown fox jumped on the wood"));
+    TokenStream ts = analyzer.tokenStream("text", "the big brown fox jumped on the wood");
     assertTokenStreamContents(ts, new String[]{"the", "big", "brown", "fox", "jumped", "on", "the", "wood"});
   }
 

Modified: lucene/dev/trunk/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java (original)
+++ lucene/dev/trunk/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java Mon Jul  8 17:55:48 2013
@@ -23,8 +23,6 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.StringReader;
-
 /**
  * Testcase for {@link UIMATypeAwareAnalyzer}
  */
@@ -51,7 +49,7 @@ public class UIMATypeAwareAnalyzerTest e
   public void baseUIMATypeAwareAnalyzerStreamTest() throws Exception {
 
     // create a token stream
-    TokenStream ts = analyzer.tokenStream("text", new StringReader("the big brown fox jumped on the wood"));
+    TokenStream ts = analyzer.tokenStream("text", "the big brown fox jumped on the wood");
 
     // check that 'the big brown fox jumped on the wood' tokens have the expected PoS types
     assertTokenStreamContents(ts,

Modified: lucene/dev/trunk/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (original)
+++ lucene/dev/trunk/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java Mon Jul  8 17:55:48 2013
@@ -21,7 +21,6 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.InputStreamReader;
-import java.io.StringReader;
 import java.text.Collator;
 import java.util.List;
 import java.util.Locale;
@@ -979,8 +978,8 @@ public class TestPerfTasksLogic extends 
   
   private void assertEqualCollation(Analyzer a1, Analyzer a2, String text)
       throws Exception {
-    TokenStream ts1 = a1.tokenStream("bogus", new StringReader(text));
-    TokenStream ts2 = a2.tokenStream("bogus", new StringReader(text));
+    TokenStream ts1 = a1.tokenStream("bogus", text);
+    TokenStream ts2 = a2.tokenStream("bogus", text);
     ts1.reset();
     ts2.reset();
     TermToBytesRefAttribute termAtt1 = ts1.addAttribute(TermToBytesRefAttribute.class);
@@ -1030,7 +1029,7 @@ public class TestPerfTasksLogic extends 
     Benchmark benchmark = execBenchmark(getAnalyzerFactoryConfig
         ("shingle-analyzer", "StandardTokenizer,ShingleFilter"));
     benchmark.getRunData().getAnalyzer().tokenStream
-        ("bogus", new StringReader(text)).close();
+        ("bogus", text).close();
     BaseTokenStreamTestCase.assertAnalyzesTo(benchmark.getRunData().getAnalyzer(), text,
                                              new String[] { "one", "one two", "two", "two three",
                                                             "three", "three four", "four", "four five",

Modified: lucene/dev/trunk/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java (original)
+++ lucene/dev/trunk/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java Mon Jul  8 17:55:48 2013
@@ -33,7 +33,6 @@ import org.apache.lucene.search.Wildcard
 import org.apache.lucene.util.BytesRef;
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.Collection;
 import java.util.LinkedList;
 
@@ -86,7 +85,7 @@ public class SimpleNaiveBayesClassifier 
 
   private String[] tokenizeDoc(String doc) throws IOException {
     Collection<String> result = new LinkedList<String>();
-    TokenStream tokenStream = analyzer.tokenStream(textFieldName, new StringReader(doc));
+    TokenStream tokenStream = analyzer.tokenStream(textFieldName, doc);
     CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
     tokenStream.reset();
     while (tokenStream.incrementToken()) {

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java Mon Jul  8 17:55:48 2013
@@ -120,11 +120,16 @@ public abstract class Analyzer implement
    * See the {@link org.apache.lucene.analysis Analysis package documentation} for
    * some examples demonstrating this.
    * 
+   * <b>NOTE:</b> If your data is available as a {@code String}, use
+   * {@link #tokenStream(String, String)} which reuses a {@code StringReader}-like
+   * instance internally.
+   * 
    * @param fieldName the name of the field the created TokenStream is used for
    * @param reader the reader the streams source reads from
    * @return TokenStream for iterating the analyzed content of <code>reader</code>
    * @throws AlreadyClosedException if the Analyzer is closed.
    * @throws IOException if an i/o error occurs.
+   * @see #tokenStream(String, String)
    */
   public final TokenStream tokenStream(final String fieldName,
                                        final Reader reader) throws IOException {
@@ -140,6 +145,45 @@ public abstract class Analyzer implement
   }
   
   /**
+   * Returns a TokenStream suitable for <code>fieldName</code>, tokenizing
+   * the contents of <code>text</code>.
+   * <p>
+   * This method uses {@link #createComponents(String, Reader)} to obtain an
+   * instance of {@link TokenStreamComponents}. It returns the sink of the
+   * components and stores the components internally. Subsequent calls to this
+   * method will reuse the previously stored components after resetting them
+   * through {@link TokenStreamComponents#setReader(Reader)}.
+   * <p>
+   * <b>NOTE:</b> After calling this method, the consumer must follow the 
+   * workflow described in {@link TokenStream} to properly consume its contents.
+   * See the {@link org.apache.lucene.analysis Analysis package documentation} for
+   * some examples demonstrating this.
+   * 
+   * @param fieldName the name of the field the created TokenStream is used for
+   * @param text the String the streams source reads from
+   * @return TokenStream for iterating the analyzed content of <code>reader</code>
+   * @throws AlreadyClosedException if the Analyzer is closed.
+   * @throws IOException if an i/o error occurs (may rarely happen for strings).
+   * @see #tokenStream(String, Reader)
+   */
+  public final TokenStream tokenStream(final String fieldName, final String text) throws IOException {
+    TokenStreamComponents components = reuseStrategy.getReusableComponents(fieldName);
+    @SuppressWarnings("resource") final ReusableStringReader strReader = 
+        (components == null || components.reusableStringReader == null) ?
+        new ReusableStringReader() : components.reusableStringReader;
+    strReader.setValue(text);
+    final Reader r = initReader(fieldName, strReader);
+    if (components == null) {
+      components = createComponents(fieldName, r);
+      reuseStrategy.setReusableComponents(fieldName, components);
+    } else {
+      components.setReader(r);
+    }
+    components.reusableStringReader = strReader;
+    return components.getTokenStream();
+  }
+    
+  /**
    * Override this if you want to add a CharFilter chain.
    * <p>
    * The default implementation returns <code>reader</code>
@@ -208,6 +252,9 @@ public abstract class Analyzer implement
      * the chain. This can be the source if there are no filters.
      */
     protected final TokenStream sink;
+    
+    /** Internal cache only used by {@link Analyzer#tokenStream(String, String)}. */
+    transient ReusableStringReader reusableStringReader;
 
     /**
      * Creates a new {@link TokenStreamComponents} instance.

Added: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/ReusableStringReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/ReusableStringReader.java?rev=1500862&view=auto
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/ReusableStringReader.java (added)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/analysis/ReusableStringReader.java Mon Jul  8 17:55:48 2013
@@ -0,0 +1,61 @@
+package org.apache.lucene.analysis;
+
+import java.io.Reader;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Internal class to enable reuse of the string reader by {@link Analyzer#tokenStream(String,String)} */
+final class ReusableStringReader extends Reader {
+  private int pos = 0, size = 0;
+  private String s = null;
+  
+  void setValue(String s) {
+    this.s = s;
+    this.size = s.length();
+    this.pos = 0;
+  }
+  
+  @Override
+  public int read() {
+    if (pos < size) {
+      return s.charAt(pos++);
+    } else {
+      s = null;
+      return -1;
+    }
+  }
+  
+  @Override
+  public int read(char[] c, int off, int len) {
+    if (pos < size) {
+      len = Math.min(len, size-pos);
+      s.getChars(pos, pos+len, c, off);
+      pos += len;
+      return len;
+    } else {
+      s = null;
+      return -1;
+    }
+  }
+  
+  @Override
+  public void close() {
+    pos = size; // this prevents NPE when reading after close!
+    s = null;
+  }
+}

Modified: lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/document/Field.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/document/Field.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/document/Field.java (original)
+++ lucene/dev/trunk/lucene/core/src/java/org/apache/lucene/document/Field.java Mon Jul  8 17:55:48 2013
@@ -75,7 +75,6 @@ public class Field implements IndexableF
   protected TokenStream tokenStream;
 
   private transient TokenStream internalTokenStream;
-  private transient ReusableStringReader internalReader;
 
   /**
    * Field's boost
@@ -552,56 +551,12 @@ public class Field implements IndexableF
     } else if (readerValue() != null) {
       return analyzer.tokenStream(name(), readerValue());
     } else if (stringValue() != null) {
-      if (internalReader == null) {
-        internalReader = new ReusableStringReader();
-      }
-      internalReader.setValue(stringValue());
-      return analyzer.tokenStream(name(), internalReader);
+      return analyzer.tokenStream(name(), stringValue());
     }
 
     throw new IllegalArgumentException("Field must have either TokenStream, String, Reader or Number value");
   }
   
-  static final class ReusableStringReader extends Reader {
-    private int pos = 0, size = 0;
-    private String s = null;
-    
-    void setValue(String s) {
-      this.s = s;
-      this.size = s.length();
-      this.pos = 0;
-    }
-    
-    @Override
-    public int read() {
-      if (pos < size) {
-        return s.charAt(pos++);
-      } else {
-        s = null;
-        return -1;
-      }
-    }
-    
-    @Override
-    public int read(char[] c, int off, int len) {
-      if (pos < size) {
-        len = Math.min(len, size-pos);
-        s.getChars(pos, pos+len, c, off);
-        pos += len;
-        return len;
-      } else {
-        s = null;
-        return -1;
-      }
-    }
-    
-    @Override
-    public void close() {
-      pos = size; // this prevents NPE when reading after close!
-      s = null;
-    }
-  }
-  
   static final class StringTokenStream extends TokenStream {
     private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
     private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class);

Modified: lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java (original)
+++ lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java Mon Jul  8 17:55:48 2013
@@ -96,7 +96,7 @@ public class TestMockAnalyzer extends Ba
     String testString = "t";
     
     Analyzer analyzer = new MockAnalyzer(random());
-    TokenStream stream = analyzer.tokenStream("dummy", new StringReader(testString));
+    TokenStream stream = analyzer.tokenStream("dummy", testString);
     stream.reset();
     while (stream.incrementToken()) {
       // consume

Added: lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/analysis/TestReusableStringReader.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/analysis/TestReusableStringReader.java?rev=1500862&view=auto
==============================================================================
--- lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/analysis/TestReusableStringReader.java (added)
+++ lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/analysis/TestReusableStringReader.java Mon Jul  8 17:55:48 2013
@@ -0,0 +1,61 @@
+package org.apache.lucene.analysis;
+
+import java.nio.CharBuffer;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class TestReusableStringReader extends LuceneTestCase {
+  
+  public void test() throws Exception {
+    ReusableStringReader reader = new ReusableStringReader();
+    assertEquals(-1, reader.read());
+    assertEquals(-1, reader.read(new char[1]));
+    assertEquals(-1, reader.read(new char[2], 1, 1));
+    assertEquals(-1, reader.read(CharBuffer.wrap(new char[2])));
+    
+    reader.setValue("foobar");
+    char[] buf = new char[4];
+    assertEquals(4, reader.read(buf));
+    assertEquals("foob", new String(buf));
+    assertEquals(2, reader.read(buf));
+    assertEquals("ar", new String(buf, 0, 2));
+    assertEquals(-1, reader.read(buf));
+    reader.close();
+
+    reader.setValue("foobar");
+    assertEquals(0, reader.read(buf, 1, 0));
+    assertEquals(3, reader.read(buf, 1, 3));
+    assertEquals("foo", new String(buf, 1, 3));
+    assertEquals(2, reader.read(CharBuffer.wrap(buf, 2, 2)));
+    assertEquals("ba", new String(buf, 2, 2));
+    assertEquals('r', (char) reader.read());
+    assertEquals(-1, reader.read(buf));
+    reader.close();
+
+    reader.setValue("foobar");
+    StringBuilder sb = new StringBuilder();
+    int ch;
+    while ((ch = reader.read()) != -1) {
+      sb.append((char) ch);
+    }
+    reader.close();
+    assertEquals("foobar", sb.toString());    
+  }
+  
+}

Modified: lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/document/TestField.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/document/TestField.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/document/TestField.java (original)
+++ lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/document/TestField.java Mon Jul  8 17:55:48 2013
@@ -18,11 +18,8 @@ package org.apache.lucene.document;
  */
 
 import java.io.StringReader;
-import java.nio.CharBuffer;
-
 import org.apache.lucene.analysis.CannedTokenStream;
 import org.apache.lucene.analysis.Token;
-import org.apache.lucene.document.Field.ReusableStringReader;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -518,39 +515,4 @@ public class TestField extends LuceneTes
     }
   }
   
-  public void testReusableStringReader() throws Exception {
-    ReusableStringReader reader = new ReusableStringReader();
-    assertEquals(-1, reader.read());
-    assertEquals(-1, reader.read(new char[1]));
-    assertEquals(-1, reader.read(new char[2], 1, 1));
-    assertEquals(-1, reader.read(CharBuffer.wrap(new char[2])));
-    
-    reader.setValue("foobar");
-    char[] buf = new char[4];
-    assertEquals(4, reader.read(buf));
-    assertEquals("foob", new String(buf));
-    assertEquals(2, reader.read(buf));
-    assertEquals("ar", new String(buf, 0, 2));
-    assertEquals(-1, reader.read(buf));
-    reader.close();
-
-    reader.setValue("foobar");
-    assertEquals(0, reader.read(buf, 1, 0));
-    assertEquals(3, reader.read(buf, 1, 3));
-    assertEquals("foo", new String(buf, 1, 3));
-    assertEquals(2, reader.read(CharBuffer.wrap(buf, 2, 2)));
-    assertEquals("ba", new String(buf, 2, 2));
-    assertEquals('r', (char) reader.read());
-    assertEquals(-1, reader.read(buf));
-    reader.close();
-
-    reader.setValue("foobar");
-    StringBuilder sb = new StringBuilder();
-    int ch;
-    while ((ch = reader.read()) != -1) {
-      sb.append((char) ch);
-    }
-    reader.close();
-    assertEquals("foobar", sb.toString());    
-  }
 }

Modified: lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java (original)
+++ lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java Mon Jul  8 17:55:48 2013
@@ -18,8 +18,6 @@ package org.apache.lucene.index;
  */
 
 import java.io.IOException;
-import java.io.StringReader;
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.TokenStream;
@@ -49,7 +47,7 @@ public class TestLongPostings extends Lu
       if (other != null && s.equals(other)) {
         continue;
       }
-      final TokenStream ts = a.tokenStream("foo", new StringReader(s));
+      final TokenStream ts = a.tokenStream("foo", s);
       final TermToBytesRefAttribute termAtt = ts.getAttribute(TermToBytesRefAttribute.class);
       final BytesRef termBytes = termAtt.getBytesRef();
       ts.reset();

Modified: lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (original)
+++ lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java Mon Jul  8 17:55:48 2013
@@ -18,8 +18,6 @@ package org.apache.lucene.index;
  */
 
 import java.io.IOException;
-import java.io.StringReader;
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.CachingTokenFilter;
 import org.apache.lucene.analysis.MockAnalyzer;
@@ -176,7 +174,7 @@ public class TestTermVectorsWriter exten
     Analyzer analyzer = new MockAnalyzer(random());
     IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
     Document doc = new Document();
-    TokenStream stream = analyzer.tokenStream("field", new StringReader("abcd   "));
+    TokenStream stream = analyzer.tokenStream("field", "abcd   ");
     stream.reset(); // TODO: weird to reset before wrapping with CachingTokenFilter... correct?
     stream = new CachingTokenFilter(stream);
     FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);

Modified: lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java (original)
+++ lucene/dev/trunk/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java Mon Jul  8 17:55:48 2013
@@ -617,7 +617,7 @@ public class TestPhraseQuery extends Luc
               break;
             }
           }
-          TokenStream ts = analyzer.tokenStream("ignore", new StringReader(term));
+          TokenStream ts = analyzer.tokenStream("ignore", term);
           CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
           ts.reset();
           while(ts.incrementToken()) {

Modified: lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java (original)
+++ lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java Mon Jul  8 17:55:48 2013
@@ -17,7 +17,6 @@ package org.apache.lucene.search.highlig
  */
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.Iterator;
 
@@ -25,7 +24,6 @@ import org.apache.lucene.analysis.Analyz
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
 import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.util.PriorityQueue;
 
 /**
@@ -78,7 +76,7 @@ public class Highlighter
   public final String getBestFragment(Analyzer analyzer, String fieldName,String text)
     throws IOException, InvalidTokenOffsetsException
   {
-    TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text));
+    TokenStream tokenStream = analyzer.tokenStream(fieldName, text);
     return getBestFragment(tokenStream, text);
   }
 
@@ -130,7 +128,7 @@ public class Highlighter
     int maxNumFragments)
     throws IOException, InvalidTokenOffsetsException
   {
-    TokenStream tokenStream = analyzer.tokenStream(fieldName, new StringReader(text));
+    TokenStream tokenStream = analyzer.tokenStream(fieldName, text);
     return getBestFragments(tokenStream, text, maxNumFragments);
   }
 

Modified: lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java?rev=1500862&r1=1500861&r2=1500862&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (original)
+++ lucene/dev/trunk/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java Mon Jul  8 17:55:48 2013
@@ -36,7 +36,6 @@ import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 
 import java.io.IOException;
-import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.Comparator;
 
@@ -314,7 +313,7 @@ public class TokenSources {
   public static TokenStream getTokenStream(String field, String contents,
       Analyzer analyzer) {
     try {
-      return analyzer.tokenStream(field, new StringReader(contents));
+      return analyzer.tokenStream(field, contents);
     } catch (IOException ex) {
       throw new RuntimeException(ex);
     }