You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by rm...@apache.org on 2013/10/07 09:54:33 UTC
svn commit: r1529780 [1/2] - in /lucene/dev/branches/branch_4x: ./ lucene/
lucene/analysis/
lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/
lucene/analysis/common/src/test/org/apache/lucene/analysis/core/
lucene/analysis/common/src/...
Author: rmuir
Date: Mon Oct 7 07:54:32 2013
New Revision: 1529780
URL: http://svn.apache.org/r1529780
Log:
LUCENE-5259: convert analysis consumers to try-with-resources (backport exception fixes)
Modified:
lucene/dev/branches/branch_4x/ (props changed)
lucene/dev/branches/branch_4x/lucene/ (props changed)
lucene/dev/branches/branch_4x/lucene/analysis/ (props changed)
lucene/dev/branches/branch_4x/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java
lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java
lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java
lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java
lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java
lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java
lucene/dev/branches/branch_4x/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java
lucene/dev/branches/branch_4x/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java
lucene/dev/branches/branch_4x/lucene/classification/ (props changed)
lucene/dev/branches/branch_4x/lucene/classification/src/ (props changed)
lucene/dev/branches/branch_4x/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
lucene/dev/branches/branch_4x/lucene/core/ (props changed)
lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
lucene/dev/branches/branch_4x/lucene/highlighter/ (props changed)
lucene/dev/branches/branch_4x/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
lucene/dev/branches/branch_4x/lucene/queries/ (props changed)
lucene/dev/branches/branch_4x/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
lucene/dev/branches/branch_4x/lucene/queryparser/ (props changed)
lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/analyzing/AnalyzingQueryParser.java
lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java
lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java
lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java
lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java
lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java
lucene/dev/branches/branch_4x/lucene/sandbox/ (props changed)
lucene/dev/branches/branch_4x/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
lucene/dev/branches/branch_4x/lucene/suggest/ (props changed)
lucene/dev/branches/branch_4x/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java
lucene/dev/branches/branch_4x/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingSuggester.java
lucene/dev/branches/branch_4x/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java
lucene/dev/branches/branch_4x/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java
lucene/dev/branches/branch_4x/lucene/test-framework/ (props changed)
lucene/dev/branches/branch_4x/lucene/test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java
lucene/dev/branches/branch_4x/solr/ (props changed)
lucene/dev/branches/branch_4x/solr/contrib/ (props changed)
lucene/dev/branches/branch_4x/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
lucene/dev/branches/branch_4x/solr/core/ (props changed)
lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java
lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/schema/CollationField.java
lucene/dev/branches/branch_4x/solr/core/src/java/org/apache/solr/schema/TextField.java
lucene/dev/branches/branch_4x/solr/core/src/test/org/apache/solr/spelling/SimpleQueryConverter.java
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java Mon Oct 7 07:54:32 2013
@@ -34,6 +34,7 @@ import org.apache.lucene.store.ByteArray
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.IntsRef;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.fst.ByteSequenceOutputs;
@@ -307,30 +308,36 @@ public class SynonymMap {
* separates by {@link SynonymMap#WORD_SEPARATOR}.
* reuse and its chars must not be null. */
public CharsRef analyze(String text, CharsRef reuse) throws IOException {
+ IOException priorException = null;
TokenStream ts = analyzer.tokenStream("", text);
- CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
- PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);
- ts.reset();
- reuse.length = 0;
- while (ts.incrementToken()) {
- int length = termAtt.length();
- if (length == 0) {
- throw new IllegalArgumentException("term: " + text + " analyzed to a zero-length token");
- }
- if (posIncAtt.getPositionIncrement() != 1) {
- throw new IllegalArgumentException("term: " + text + " analyzed to a token with posinc != 1");
- }
- reuse.grow(reuse.length + length + 1); /* current + word + separator */
- int end = reuse.offset + reuse.length;
- if (reuse.length > 0) {
- reuse.chars[end++] = SynonymMap.WORD_SEPARATOR;
- reuse.length++;
+ try {
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+ PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);
+ ts.reset();
+ reuse.length = 0;
+ while (ts.incrementToken()) {
+ int length = termAtt.length();
+ if (length == 0) {
+ throw new IllegalArgumentException("term: " + text + " analyzed to a zero-length token");
+ }
+ if (posIncAtt.getPositionIncrement() != 1) {
+ throw new IllegalArgumentException("term: " + text + " analyzed to a token with posinc != 1");
+ }
+ reuse.grow(reuse.length + length + 1); /* current + word + separator */
+ int end = reuse.offset + reuse.length;
+ if (reuse.length > 0) {
+ reuse.chars[end++] = SynonymMap.WORD_SEPARATOR;
+ reuse.length++;
+ }
+ System.arraycopy(termAtt.buffer(), 0, reuse.chars, end, length);
+ reuse.length += length;
}
- System.arraycopy(termAtt.buffer(), 0, reuse.chars, end, length);
- reuse.length += length;
+ ts.end();
+ } catch (IOException e) {
+ priorException = e;
+ } finally {
+ IOUtils.closeWhileHandlingException(priorException, ts);
}
- ts.end();
- ts.close();
if (reuse.length == 0) {
throw new IllegalArgumentException("term: " + text + " was completely eliminated by analyzer");
}
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java Mon Oct 7 07:54:32 2013
@@ -37,6 +37,7 @@ import org.apache.lucene.search.IndexSea
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util._TestUtil;
public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
@@ -118,11 +119,17 @@ public class TestKeywordAnalyzer extends
// LUCENE-1441
public void testOffsets() throws Exception {
TokenStream stream = new KeywordAnalyzer().tokenStream("field", new StringReader("abcd"));
- OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
- stream.reset();
- assertTrue(stream.incrementToken());
- assertEquals(0, offsetAtt.startOffset());
- assertEquals(4, offsetAtt.endOffset());
+ try {
+ OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
+ stream.reset();
+ assertTrue(stream.incrementToken());
+ assertEquals(0, offsetAtt.startOffset());
+ assertEquals(4, offsetAtt.endOffset());
+ assertFalse(stream.incrementToken());
+ stream.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(stream);
+ }
}
/** blast some random strings through the analyzer */
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java Mon Oct 7 07:54:32 2013
@@ -22,6 +22,7 @@ import org.apache.lucene.analysis.TokenS
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.Version;
import java.io.IOException;
@@ -47,12 +48,17 @@ public class TestStopAnalyzer extends Ba
public void testDefaults() throws IOException {
assertTrue(stop != null);
TokenStream stream = stop.tokenStream("test", "This is a test of the english stop analyzer");
- assertTrue(stream != null);
- CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
- stream.reset();
+ try {
+ assertTrue(stream != null);
+ CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
+ stream.reset();
- while (stream.incrementToken()) {
- assertFalse(inValidTokens.contains(termAtt.toString()));
+ while (stream.incrementToken()) {
+ assertFalse(inValidTokens.contains(termAtt.toString()));
+ }
+ stream.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(stream);
}
}
@@ -60,13 +66,18 @@ public class TestStopAnalyzer extends Ba
CharArraySet stopWordsSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("good", "test", "analyzer"), false);
StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_40, stopWordsSet);
TokenStream stream = newStop.tokenStream("test", "This is a good test of the english stop analyzer");
- assertNotNull(stream);
- CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
+ try {
+ assertNotNull(stream);
+ CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
- stream.reset();
- while (stream.incrementToken()) {
- String text = termAtt.toString();
- assertFalse(stopWordsSet.contains(text));
+ stream.reset();
+ while (stream.incrementToken()) {
+ String text = termAtt.toString();
+ assertFalse(stopWordsSet.contains(text));
+ }
+ stream.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(stream);
}
}
@@ -76,16 +87,21 @@ public class TestStopAnalyzer extends Ba
String s = "This is a good test of the english stop analyzer with positions";
int expectedIncr[] = { 1, 1, 1, 3, 1, 1, 1, 2, 1};
TokenStream stream = newStop.tokenStream("test", s);
- assertNotNull(stream);
- int i = 0;
- CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
- PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class);
-
- stream.reset();
- while (stream.incrementToken()) {
- String text = termAtt.toString();
- assertFalse(stopWordsSet.contains(text));
- assertEquals(expectedIncr[i++],posIncrAtt.getPositionIncrement());
+ try {
+ assertNotNull(stream);
+ int i = 0;
+ CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
+ PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class);
+
+ stream.reset();
+ while (stream.incrementToken()) {
+ String text = termAtt.toString();
+ assertFalse(stopWordsSet.contains(text));
+ assertEquals(expectedIncr[i++],posIncrAtt.getPositionIncrement());
+ }
+ stream.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(stream);
}
}
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java Mon Oct 7 07:54:32 2013
@@ -9,6 +9,7 @@ import org.apache.lucene.analysis.*;
import org.apache.lucene.analysis.core.SimpleAnalyzer;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.util.IOUtils;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -38,22 +39,34 @@ public class TestPerFieldAnalyzerWrapper
new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), analyzerPerField);
TokenStream tokenStream = analyzer.tokenStream("field", text);
- CharTermAttribute termAtt = tokenStream.getAttribute(CharTermAttribute.class);
- tokenStream.reset();
+ try {
+ CharTermAttribute termAtt = tokenStream.getAttribute(CharTermAttribute.class);
+ tokenStream.reset();
- assertTrue(tokenStream.incrementToken());
- assertEquals("WhitespaceAnalyzer does not lowercase",
+ assertTrue(tokenStream.incrementToken());
+ assertEquals("WhitespaceAnalyzer does not lowercase",
"Qwerty",
termAtt.toString());
+ assertFalse(tokenStream.incrementToken());
+ tokenStream.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(tokenStream);
+ }
tokenStream = analyzer.tokenStream("special", text);
- termAtt = tokenStream.getAttribute(CharTermAttribute.class);
- tokenStream.reset();
+ try {
+ CharTermAttribute termAtt = tokenStream.getAttribute(CharTermAttribute.class);
+ tokenStream.reset();
- assertTrue(tokenStream.incrementToken());
- assertEquals("SimpleAnalyzer lowercases",
+ assertTrue(tokenStream.incrementToken());
+ assertEquals("SimpleAnalyzer lowercases",
"qwerty",
termAtt.toString());
+ assertFalse(tokenStream.incrementToken());
+ tokenStream.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(tokenStream);
+ }
}
public void testCharFilters() throws Exception {
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java Mon Oct 7 07:54:32 2013
@@ -34,6 +34,7 @@ import org.apache.lucene.index.IndexWrit
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
/**
* A test class for ShingleAnalyzerWrapper as regards queries and scoring.
@@ -96,16 +97,21 @@ public class ShingleAnalyzerWrapperTest
PhraseQuery q = new PhraseQuery();
TokenStream ts = analyzer.tokenStream("content", "this sentence");
- int j = -1;
+ try {
+ int j = -1;
- PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
- CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+ PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
- ts.reset();
- while (ts.incrementToken()) {
- j += posIncrAtt.getPositionIncrement();
- String termText = termAtt.toString();
- q.add(new Term("content", termText), j);
+ ts.reset();
+ while (ts.incrementToken()) {
+ j += posIncrAtt.getPositionIncrement();
+ String termText = termAtt.toString();
+ q.add(new Term("content", termText), j);
+ }
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
@@ -122,15 +128,18 @@ public class ShingleAnalyzerWrapperTest
BooleanQuery q = new BooleanQuery();
TokenStream ts = analyzer.tokenStream("content", "test sentence");
+ try {
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
- CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
-
- ts.reset();
-
- while (ts.incrementToken()) {
- String termText = termAtt.toString();
- q.add(new TermQuery(new Term("content", termText)),
+ ts.reset();
+ while (ts.incrementToken()) {
+ String termText = termAtt.toString();
+ q.add(new TermQuery(new Term("content", termText)),
BooleanClause.Occur.SHOULD);
+ }
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
Modified: lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharTokenizers.java Mon Oct 7 07:54:32 2013
@@ -29,6 +29,7 @@ import org.apache.lucene.analysis.Tokeni
import org.apache.lucene.analysis.core.LetterTokenizer;
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util._TestUtil;
@@ -124,17 +125,20 @@ public class TestCharTokenizers extends
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random());
TokenStream ts = analyzer.tokenStream("foo", s);
- ts.reset();
- OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
- while (ts.incrementToken()) {
- String highlightedText = s.substring(offsetAtt.startOffset(), offsetAtt.endOffset());
- for (int j = 0, cp = 0; j < highlightedText.length(); j += Character.charCount(cp)) {
- cp = highlightedText.codePointAt(j);
- assertTrue("non-letter:" + Integer.toHexString(cp), Character.isLetter(cp));
+ try {
+ ts.reset();
+ OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
+ while (ts.incrementToken()) {
+ String highlightedText = s.substring(offsetAtt.startOffset(), offsetAtt.endOffset());
+ for (int j = 0, cp = 0; j < highlightedText.length(); j += Character.charCount(cp)) {
+ cp = highlightedText.codePointAt(j);
+ assertTrue("non-letter:" + Integer.toHexString(cp), Character.isLetter(cp));
+ }
}
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- ts.end();
- ts.close();
}
// just for fun
checkRandomData(random(), analyzer, num);
@@ -162,17 +166,20 @@ public class TestCharTokenizers extends
for (int i = 0; i < num; i++) {
String s = _TestUtil.randomUnicodeString(random());
TokenStream ts = analyzer.tokenStream("foo", s);
- ts.reset();
- OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
- while (ts.incrementToken()) {
- String highlightedText = s.substring(offsetAtt.startOffset(), offsetAtt.endOffset());
- for (int j = 0, cp = 0; j < highlightedText.length(); j += Character.charCount(cp)) {
- cp = highlightedText.codePointAt(j);
- assertTrue("non-letter:" + Integer.toHexString(cp), Character.isLetter(cp));
+ try {
+ ts.reset();
+ OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
+ while (ts.incrementToken()) {
+ String highlightedText = s.substring(offsetAtt.startOffset(), offsetAtt.endOffset());
+ for (int j = 0, cp = 0; j < highlightedText.length(); j += Character.charCount(cp)) {
+ cp = highlightedText.codePointAt(j);
+ assertTrue("non-letter:" + Integer.toHexString(cp), Character.isLetter(cp));
+ }
}
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- ts.end();
- ts.close();
}
// just for fun
checkRandomData(random(), analyzer, num);
Modified: lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java Mon Oct 7 07:54:32 2013
@@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenS
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.icu.ICUNormalizer2Filter;
import org.apache.lucene.analysis.icu.tokenattributes.ScriptAttribute;
+import org.apache.lucene.util.IOUtils;
import com.ibm.icu.lang.UScript;
@@ -250,15 +251,18 @@ public class TestICUTokenizer extends Ba
public void testTokenAttributes() throws Exception {
TokenStream ts = a.tokenStream("dummy", "This is a test");
- ScriptAttribute scriptAtt = ts.addAttribute(ScriptAttribute.class);
- ts.reset();
- while (ts.incrementToken()) {
- assertEquals(UScript.LATIN, scriptAtt.getCode());
- assertEquals(UScript.getName(UScript.LATIN), scriptAtt.getName());
- assertEquals(UScript.getShortName(UScript.LATIN), scriptAtt.getShortName());
- assertTrue(ts.reflectAsString(false).contains("script=Latin"));
+ try {
+ ScriptAttribute scriptAtt = ts.addAttribute(ScriptAttribute.class);
+ ts.reset();
+ while (ts.incrementToken()) {
+ assertEquals(UScript.LATIN, scriptAtt.getCode());
+ assertEquals(UScript.getName(UScript.LATIN), scriptAtt.getName());
+ assertEquals(UScript.getShortName(UScript.LATIN), scriptAtt.getShortName());
+ assertTrue(ts.reflectAsString(false).contains("script=Latin"));
+ }
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- ts.end();
- ts.close();
}
}
Modified: lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestExtendedMode.java Mon Oct 7 07:54:32 2013
@@ -27,6 +27,7 @@ import org.apache.lucene.analysis.TokenS
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.LuceneTestCase.Slow;
@@ -54,13 +55,16 @@ public class TestExtendedMode extends Ba
for (int i = 0; i < numIterations; i++) {
String s = _TestUtil.randomUnicodeString(random(), 100);
TokenStream ts = analyzer.tokenStream("foo", s);
- CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
- ts.reset();
- while (ts.incrementToken()) {
- assertTrue(UnicodeUtil.validUTF16String(termAtt));
+ try {
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+ ts.reset();
+ while (ts.incrementToken()) {
+ assertTrue(UnicodeUtil.validUTF16String(termAtt));
+ }
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- ts.end();
- ts.close();
}
}
Modified: lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizer.java Mon Oct 7 07:54:32 2013
@@ -142,12 +142,15 @@ public class TestJapaneseTokenizer exten
*/
public void testDecomposition5() throws Exception {
TokenStream ts = analyzer.tokenStream("bogus", "ãããããããããããããããããããããããããããããããããããããããã");
- ts.reset();
- while (ts.incrementToken()) {
+ try {
+ ts.reset();
+ while (ts.incrementToken()) {
+ }
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- ts.end();
- ts.close();
}
/*
@@ -214,11 +217,14 @@ public class TestJapaneseTokenizer exten
for (int i = 0; i < 100; i++) {
String s = _TestUtil.randomUnicodeString(random(), 10000);
TokenStream ts = analyzer.tokenStream("foo", s);
- ts.reset();
- while (ts.incrementToken()) {
+ try {
+ ts.reset();
+ while (ts.incrementToken()) {
+ }
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- ts.end();
- ts.close();
}
}
@@ -237,28 +243,39 @@ public class TestJapaneseTokenizer exten
}
String s = _TestUtil.randomUnicodeString(random(), 100);
TokenStream ts = analyzer.tokenStream("foo", s);
- CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
- ts.reset();
- while (ts.incrementToken()) {
- assertTrue(UnicodeUtil.validUTF16String(termAtt));
+ try {
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+ ts.reset();
+ while (ts.incrementToken()) {
+ assertTrue(UnicodeUtil.validUTF16String(termAtt));
+ }
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- ts.end();
- ts.close();
}
}
public void testOnlyPunctuation() throws IOException {
TokenStream ts = analyzerNoPunct.tokenStream("foo", "ãããã");
- ts.reset();
- assertFalse(ts.incrementToken());
- ts.end();
+ try {
+ ts.reset();
+ assertFalse(ts.incrementToken());
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
+ }
}
public void testOnlyPunctuationExtended() throws IOException {
TokenStream ts = extendedModeAnalyzerNoPunct.tokenStream("foo", "......");
- ts.reset();
- assertFalse(ts.incrementToken());
- ts.end();
+ try {
+ ts.reset();
+ assertFalse(ts.incrementToken());
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
+ }
}
// note: test is kinda silly since kuromoji emits punctuation tokens.
@@ -370,74 +387,98 @@ public class TestJapaneseTokenizer exten
private void assertReadings(String input, String... readings) throws IOException {
TokenStream ts = analyzer.tokenStream("ignored", input);
- ReadingAttribute readingAtt = ts.addAttribute(ReadingAttribute.class);
- ts.reset();
- for(String reading : readings) {
- assertTrue(ts.incrementToken());
- assertEquals(reading, readingAtt.getReading());
+ try {
+ ReadingAttribute readingAtt = ts.addAttribute(ReadingAttribute.class);
+ ts.reset();
+ for(String reading : readings) {
+ assertTrue(ts.incrementToken());
+ assertEquals(reading, readingAtt.getReading());
+ }
+ assertFalse(ts.incrementToken());
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- assertFalse(ts.incrementToken());
- ts.end();
}
private void assertPronunciations(String input, String... pronunciations) throws IOException {
TokenStream ts = analyzer.tokenStream("ignored", input);
- ReadingAttribute readingAtt = ts.addAttribute(ReadingAttribute.class);
- ts.reset();
- for(String pronunciation : pronunciations) {
- assertTrue(ts.incrementToken());
- assertEquals(pronunciation, readingAtt.getPronunciation());
+ try {
+ ReadingAttribute readingAtt = ts.addAttribute(ReadingAttribute.class);
+ ts.reset();
+ for(String pronunciation : pronunciations) {
+ assertTrue(ts.incrementToken());
+ assertEquals(pronunciation, readingAtt.getPronunciation());
+ }
+ assertFalse(ts.incrementToken());
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- assertFalse(ts.incrementToken());
- ts.end();
}
private void assertBaseForms(String input, String... baseForms) throws IOException {
TokenStream ts = analyzer.tokenStream("ignored", input);
- BaseFormAttribute baseFormAtt = ts.addAttribute(BaseFormAttribute.class);
- ts.reset();
- for(String baseForm : baseForms) {
- assertTrue(ts.incrementToken());
- assertEquals(baseForm, baseFormAtt.getBaseForm());
+ try {
+ BaseFormAttribute baseFormAtt = ts.addAttribute(BaseFormAttribute.class);
+ ts.reset();
+ for(String baseForm : baseForms) {
+ assertTrue(ts.incrementToken());
+ assertEquals(baseForm, baseFormAtt.getBaseForm());
+ }
+ assertFalse(ts.incrementToken());
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- assertFalse(ts.incrementToken());
- ts.end();
}
private void assertInflectionTypes(String input, String... inflectionTypes) throws IOException {
TokenStream ts = analyzer.tokenStream("ignored", input);
- InflectionAttribute inflectionAtt = ts.addAttribute(InflectionAttribute.class);
- ts.reset();
- for(String inflectionType : inflectionTypes) {
- assertTrue(ts.incrementToken());
- assertEquals(inflectionType, inflectionAtt.getInflectionType());
+ try {
+ InflectionAttribute inflectionAtt = ts.addAttribute(InflectionAttribute.class);
+ ts.reset();
+ for(String inflectionType : inflectionTypes) {
+ assertTrue(ts.incrementToken());
+ assertEquals(inflectionType, inflectionAtt.getInflectionType());
+ }
+ assertFalse(ts.incrementToken());
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- assertFalse(ts.incrementToken());
- ts.end();
}
private void assertInflectionForms(String input, String... inflectionForms) throws IOException {
TokenStream ts = analyzer.tokenStream("ignored", input);
- InflectionAttribute inflectionAtt = ts.addAttribute(InflectionAttribute.class);
- ts.reset();
- for(String inflectionForm : inflectionForms) {
- assertTrue(ts.incrementToken());
- assertEquals(inflectionForm, inflectionAtt.getInflectionForm());
+ try {
+ InflectionAttribute inflectionAtt = ts.addAttribute(InflectionAttribute.class);
+ ts.reset();
+ for(String inflectionForm : inflectionForms) {
+ assertTrue(ts.incrementToken());
+ assertEquals(inflectionForm, inflectionAtt.getInflectionForm());
+ }
+ assertFalse(ts.incrementToken());
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- assertFalse(ts.incrementToken());
- ts.end();
}
private void assertPartsOfSpeech(String input, String... partsOfSpeech) throws IOException {
TokenStream ts = analyzer.tokenStream("ignored", input);
- PartOfSpeechAttribute partOfSpeechAtt = ts.addAttribute(PartOfSpeechAttribute.class);
- ts.reset();
- for(String partOfSpeech : partsOfSpeech) {
- assertTrue(ts.incrementToken());
- assertEquals(partOfSpeech, partOfSpeechAtt.getPartOfSpeech());
+ try {
+ PartOfSpeechAttribute partOfSpeechAtt = ts.addAttribute(PartOfSpeechAttribute.class);
+ ts.reset();
+ for(String partOfSpeech : partsOfSpeech) {
+ assertTrue(ts.incrementToken());
+ assertEquals(partOfSpeech, partOfSpeechAtt.getPartOfSpeech());
+ }
+ assertFalse(ts.incrementToken());
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- assertFalse(ts.incrementToken());
- ts.end();
}
public void testReadings() throws Exception {
@@ -631,11 +672,14 @@ public class TestJapaneseTokenizer exten
long totalStart = System.currentTimeMillis();
for (int i = 0; i < numIterations; i++) {
- final TokenStream ts = analyzer.tokenStream("ignored", line);
- ts.reset();
- while(ts.incrementToken());
- ts.end();
- ts.close();
+ TokenStream ts = analyzer.tokenStream("ignored", line);
+ try {
+ ts.reset();
+ while(ts.incrementToken());
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
+ }
}
String[] sentences = line.split("ã|ã");
if (VERBOSE) {
@@ -645,11 +689,14 @@ public class TestJapaneseTokenizer exten
totalStart = System.currentTimeMillis();
for (int i = 0; i < numIterations; i++) {
for (String sentence: sentences) {
- final TokenStream ts = analyzer.tokenStream("ignored", sentence);
- ts.reset();
- while(ts.incrementToken());
- ts.end();
- ts.close();
+ TokenStream ts = analyzer.tokenStream("ignored", sentence);
+ try {
+ ts.reset();
+ while(ts.incrementToken());
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
+ }
}
}
if (VERBOSE) {
Modified: lucene/dev/branches/branch_4x/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java Mon Oct 7 07:54:32 2013
@@ -30,6 +30,7 @@ import org.apache.lucene.analysis.standa
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.Version;
/**
@@ -73,12 +74,17 @@ public class TestMorfologikAnalyzer exte
@SuppressWarnings("unused")
private void dumpTokens(String input) throws IOException {
TokenStream ts = getTestAnalyzer().tokenStream("dummy", input);
- ts.reset();
+ try {
+ ts.reset();
- MorphosyntacticTagsAttribute attribute = ts.getAttribute(MorphosyntacticTagsAttribute.class);
- CharTermAttribute charTerm = ts.getAttribute(CharTermAttribute.class);
- while (ts.incrementToken()) {
- System.out.println(charTerm.toString() + " => " + attribute.getTags());
+ MorphosyntacticTagsAttribute attribute = ts.getAttribute(MorphosyntacticTagsAttribute.class);
+ CharTermAttribute charTerm = ts.getAttribute(CharTermAttribute.class);
+ while (ts.incrementToken()) {
+ System.out.println(charTerm.toString() + " => " + attribute.getTags());
+ }
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
}
@@ -86,20 +92,26 @@ public class TestMorfologikAnalyzer exte
public final void testLeftoverStems() throws IOException {
Analyzer a = getTestAnalyzer();
TokenStream ts_1 = a.tokenStream("dummy", "liÅcie");
- CharTermAttribute termAtt_1 = ts_1.getAttribute(CharTermAttribute.class);
- ts_1.reset();
- ts_1.incrementToken();
- assertEquals("first stream", "liÅcie", termAtt_1.toString());
- ts_1.end();
- ts_1.close();
+ try {
+ CharTermAttribute termAtt_1 = ts_1.getAttribute(CharTermAttribute.class);
+ ts_1.reset();
+ ts_1.incrementToken();
+ assertEquals("first stream", "liÅcie", termAtt_1.toString());
+ ts_1.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts_1);
+ }
TokenStream ts_2 = a.tokenStream("dummy", "danych");
- CharTermAttribute termAtt_2 = ts_2.getAttribute(CharTermAttribute.class);
- ts_2.reset();
- ts_2.incrementToken();
- assertEquals("second stream", "dany", termAtt_2.toString());
- ts_2.end();
- ts_2.close();
+ try {
+ CharTermAttribute termAtt_2 = ts_2.getAttribute(CharTermAttribute.class);
+ ts_2.reset();
+ ts_2.incrementToken();
+ assertEquals("second stream", "dany", termAtt_2.toString());
+ ts_2.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts_2);
+ }
}
/** Test stemming of mixed-case tokens. */
@@ -141,27 +153,29 @@ public class TestMorfologikAnalyzer exte
/** Test morphosyntactic annotations. */
public final void testPOSAttribute() throws IOException {
TokenStream ts = getTestAnalyzer().tokenStream("dummy", "liÅcie");
-
- ts.reset();
- assertPOSToken(ts, "liÅcie",
+ try {
+ ts.reset();
+ assertPOSToken(ts, "liÅcie",
"subst:sg:acc:n2",
"subst:sg:nom:n2",
"subst:sg:voc:n2");
- assertPOSToken(ts, "liÅÄ",
+ assertPOSToken(ts, "liÅÄ",
"subst:pl:acc:m3",
"subst:pl:nom:m3",
"subst:pl:voc:m3");
- assertPOSToken(ts, "list",
+ assertPOSToken(ts, "list",
"subst:sg:loc:m3",
"subst:sg:voc:m3");
- assertPOSToken(ts, "lista",
+ assertPOSToken(ts, "lista",
"subst:sg:dat:f",
"subst:sg:loc:f");
- ts.end();
- ts.close();
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
+ }
}
/** */
Modified: lucene/dev/branches/branch_4x/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java Mon Oct 7 07:54:32 2013
@@ -29,6 +29,7 @@ import org.apache.lucene.analysis.TokenS
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.Version;
public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase {
@@ -185,8 +186,13 @@ public class TestSmartChineseAnalyzer ex
}
Analyzer analyzer = new SmartChineseAnalyzer(TEST_VERSION_CURRENT);
TokenStream stream = analyzer.tokenStream("", sb.toString());
- stream.reset();
- while (stream.incrementToken()) {
+ try {
+ stream.reset();
+ while (stream.incrementToken()) {
+ }
+ stream.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(stream);
}
}
@@ -198,8 +204,13 @@ public class TestSmartChineseAnalyzer ex
}
Analyzer analyzer = new SmartChineseAnalyzer(TEST_VERSION_CURRENT);
TokenStream stream = analyzer.tokenStream("", sb.toString());
- stream.reset();
- while (stream.incrementToken()) {
+ try {
+ stream.reset();
+ while (stream.incrementToken()) {
+ }
+ stream.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(stream);
}
}
Modified: lucene/dev/branches/branch_4x/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java (original)
+++ lucene/dev/branches/branch_4x/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java Mon Oct 7 07:54:32 2013
@@ -31,6 +31,7 @@ import org.apache.lucene.search.TermQuer
import org.apache.lucene.search.TotalHitCountCollector;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import java.io.IOException;
import java.util.Collection;
@@ -86,13 +87,16 @@ public class SimpleNaiveBayesClassifier
private String[] tokenizeDoc(String doc) throws IOException {
Collection<String> result = new LinkedList<String>();
TokenStream tokenStream = analyzer.tokenStream(textFieldName, doc);
- CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
- tokenStream.reset();
- while (tokenStream.incrementToken()) {
- result.add(charTermAttribute.toString());
+ try {
+ CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class);
+ tokenStream.reset();
+ while (tokenStream.incrementToken()) {
+ result.add(charTermAttribute.toString());
+ }
+ tokenStream.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(tokenStream);
}
- tokenStream.end();
- tokenStream.close();
return result.toArray(new String[result.size()]);
}
Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java Mon Oct 7 07:54:32 2013
@@ -5,6 +5,7 @@ import java.io.StringReader;
import java.util.Arrays;
import java.util.Random;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.BasicAutomata;
@@ -98,13 +99,19 @@ public class TestMockAnalyzer extends Ba
String testString = "t";
Analyzer analyzer = new MockAnalyzer(random());
+ Exception priorException = null;
TokenStream stream = analyzer.tokenStream("dummy", testString);
- stream.reset();
- while (stream.incrementToken()) {
- // consume
+ try {
+ stream.reset();
+ while (stream.incrementToken()) {
+ // consume
+ }
+ stream.end();
+ } catch (Exception e) {
+ priorException = e;
+ } finally {
+ IOUtils.closeWhileHandlingException(priorException, stream);
}
- stream.end();
- stream.close();
assertAnalyzesTo(analyzer, testString, new String[] { "t" });
}
@@ -121,13 +128,19 @@ public class TestMockAnalyzer extends Ba
StringReader reader = new StringReader(s);
MockCharFilter charfilter = new MockCharFilter(reader, 2);
MockAnalyzer analyzer = new MockAnalyzer(random());
+ Exception priorException = null;
TokenStream ts = analyzer.tokenStream("bogus", charfilter);
- ts.reset();
- while (ts.incrementToken()) {
- ;
+ try {
+ ts.reset();
+ while (ts.incrementToken()) {
+ ;
+ }
+ ts.end();
+ } catch (Exception e) {
+ priorException = e;
+ } finally {
+ IOUtils.closeWhileHandlingException(priorException, ts);
}
- ts.end();
- ts.close();
}
}
Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java Mon Oct 7 07:54:32 2013
@@ -30,6 +30,7 @@ import org.apache.lucene.document.TextFi
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.util.FixedBitSet;
@@ -47,30 +48,35 @@ public class TestLongPostings extends Lu
if (other != null && s.equals(other)) {
continue;
}
- final TokenStream ts = a.tokenStream("foo", s);
- final TermToBytesRefAttribute termAtt = ts.getAttribute(TermToBytesRefAttribute.class);
- final BytesRef termBytes = termAtt.getBytesRef();
- ts.reset();
-
- int count = 0;
- boolean changed = false;
-
- while(ts.incrementToken()) {
- termAtt.fillBytesRef();
- if (count == 0 && !termBytes.utf8ToString().equals(s)) {
- // The value was changed during analysis. Keep iterating so the
- // tokenStream is exhausted.
- changed = true;
+ IOException priorException = null;
+ TokenStream ts = a.tokenStream("foo", s);
+ try {
+ final TermToBytesRefAttribute termAtt = ts.getAttribute(TermToBytesRefAttribute.class);
+ final BytesRef termBytes = termAtt.getBytesRef();
+ ts.reset();
+
+ int count = 0;
+ boolean changed = false;
+
+ while(ts.incrementToken()) {
+ termAtt.fillBytesRef();
+ if (count == 0 && !termBytes.utf8ToString().equals(s)) {
+ // The value was changed during analysis. Keep iterating so the
+ // tokenStream is exhausted.
+ changed = true;
+ }
+ count++;
}
- count++;
- }
- ts.end();
- ts.close();
-
- // Did we iterate just once and the value was unchanged?
- if (!changed && count == 1) {
- return s;
+ ts.end();
+ // Did we iterate just once and the value was unchanged?
+ if (!changed && count == 1) {
+ return s;
+ }
+ } catch (IOException e) {
+ priorException = e;
+ } finally {
+ IOUtils.closeWhileHandlingException(priorException, ts);
}
}
}
Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java Mon Oct 7 07:54:32 2013
@@ -34,6 +34,7 @@ import org.apache.lucene.store.Directory
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
/** tests for writing term vectors */
@@ -174,17 +175,24 @@ public class TestTermVectorsWriter exten
Analyzer analyzer = new MockAnalyzer(random());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
+ IOException priorException = null;
TokenStream stream = analyzer.tokenStream("field", "abcd ");
- stream.reset(); // TODO: weird to reset before wrapping with CachingTokenFilter... correct?
- stream = new CachingTokenFilter(stream);
- FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
- customType.setStoreTermVectors(true);
- customType.setStoreTermVectorPositions(true);
- customType.setStoreTermVectorOffsets(true);
- Field f = new Field("field", stream, customType);
- doc.add(f);
- doc.add(f);
- w.addDocument(doc);
+ try {
+ stream.reset(); // TODO: weird to reset before wrapping with CachingTokenFilter... correct?
+ TokenStream cachedStream = new CachingTokenFilter(stream);
+ FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
+ customType.setStoreTermVectors(true);
+ customType.setStoreTermVectorPositions(true);
+ customType.setStoreTermVectorOffsets(true);
+ Field f = new Field("field", cachedStream, customType);
+ doc.add(f);
+ doc.add(f);
+ w.addDocument(doc);
+ } catch (IOException e) {
+ priorException = e;
+ } finally {
+ IOUtils.closeWhileHandlingException(priorException, stream);
+ }
w.close();
IndexReader r = DirectoryReader.open(dir);
Modified: lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java (original)
+++ lucene/dev/branches/branch_4x/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java Mon Oct 7 07:54:32 2013
@@ -624,16 +624,22 @@ public class TestPhraseQuery extends Luc
break;
}
}
+ IOException priorException = null;
TokenStream ts = analyzer.tokenStream("ignore", term);
- CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
- ts.reset();
- while(ts.incrementToken()) {
- String text = termAttr.toString();
- doc.add(text);
- sb.append(text).append(' ');
+ try {
+ CharTermAttribute termAttr = ts.addAttribute(CharTermAttribute.class);
+ ts.reset();
+ while(ts.incrementToken()) {
+ String text = termAttr.toString();
+ doc.add(text);
+ sb.append(text).append(' ');
+ }
+ ts.end();
+ } catch (IOException e) {
+ priorException = e;
+ } finally {
+ IOUtils.closeWhileHandlingException(priorException, ts);
}
- ts.end();
- ts.close();
} else {
// pick existing sub-phrase
List<String> lastDoc = docs.get(r.nextInt(docs.size()));
Modified: lucene/dev/branches/branch_4x/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (original)
+++ lucene/dev/branches/branch_4x/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java Mon Oct 7 07:54:32 2013
@@ -43,6 +43,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
public abstract class AbstractTestCase extends LuceneTestCase {
@@ -171,19 +172,22 @@ public abstract class AbstractTestCase e
List<BytesRef> bytesRefs = new ArrayList<BytesRef>();
TokenStream tokenStream = analyzer.tokenStream(field, text);
- TermToBytesRefAttribute termAttribute = tokenStream.getAttribute(TermToBytesRefAttribute.class);
+ try {
+ TermToBytesRefAttribute termAttribute = tokenStream.getAttribute(TermToBytesRefAttribute.class);
- BytesRef bytesRef = termAttribute.getBytesRef();
+ BytesRef bytesRef = termAttribute.getBytesRef();
- tokenStream.reset();
+ tokenStream.reset();
- while (tokenStream.incrementToken()) {
- termAttribute.fillBytesRef();
- bytesRefs.add(BytesRef.deepCopyOf(bytesRef));
- }
+ while (tokenStream.incrementToken()) {
+ termAttribute.fillBytesRef();
+ bytesRefs.add(BytesRef.deepCopyOf(bytesRef));
+ }
- tokenStream.end();
- tokenStream.close();
+ tokenStream.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(tokenStream);
+ }
return bytesRefs;
}
Modified: lucene/dev/branches/branch_4x/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java (original)
+++ lucene/dev/branches/branch_4x/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java Mon Oct 7 07:54:32 2013
@@ -34,6 +34,7 @@ import org.apache.lucene.search.similari
import org.apache.lucene.search.similarities.TFIDFSimilarity;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.PriorityQueue;
import org.apache.lucene.util.UnicodeUtil;
@@ -776,30 +777,33 @@ public final class MoreLikeThis {
"term vectors, you must provide an Analyzer");
}
TokenStream ts = analyzer.tokenStream(fieldName, r);
- int tokenCount = 0;
- // for every token
- CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
- ts.reset();
- while (ts.incrementToken()) {
- String word = termAtt.toString();
- tokenCount++;
- if (tokenCount > maxNumTokensParsed) {
- break;
- }
- if (isNoiseWord(word)) {
- continue;
- }
+ try {
+ int tokenCount = 0;
+ // for every token
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+ ts.reset();
+ while (ts.incrementToken()) {
+ String word = termAtt.toString();
+ tokenCount++;
+ if (tokenCount > maxNumTokensParsed) {
+ break;
+ }
+ if (isNoiseWord(word)) {
+ continue;
+ }
- // increment frequency
- Int cnt = termFreqMap.get(word);
- if (cnt == null) {
- termFreqMap.put(word, new Int());
- } else {
- cnt.x++;
+ // increment frequency
+ Int cnt = termFreqMap.get(word);
+ if (cnt == null) {
+ termFreqMap.put(word, new Int());
+ } else {
+ cnt.x++;
+ }
}
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- ts.end();
- ts.close();
}
Modified: lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/analyzing/AnalyzingQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/analyzing/AnalyzingQueryParser.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/analyzing/AnalyzingQueryParser.java (original)
+++ lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/analyzing/AnalyzingQueryParser.java Mon Oct 7 07:54:32 2013
@@ -26,6 +26,7 @@ import org.apache.lucene.analysis.TokenS
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.search.Query;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.Version;
/**
@@ -163,7 +164,7 @@ public class AnalyzingQueryParser extend
protected String analyzeSingleChunk(String field, String termStr, String chunk) throws ParseException{
String analyzed = null;
TokenStream stream = null;
- try{
+ try {
stream = getAnalyzer().tokenStream(field, chunk);
stream.reset();
CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
@@ -186,7 +187,6 @@ public class AnalyzingQueryParser extend
multipleOutputs.append('"');
}
stream.end();
- stream.close();
if (null != multipleOutputs) {
throw new ParseException(
String.format(getLocale(),
@@ -196,12 +196,13 @@ public class AnalyzingQueryParser extend
// nothing returned by analyzer. Was it a stop word and the user accidentally
// used an analyzer with stop words?
stream.end();
- stream.close();
throw new ParseException(String.format(getLocale(), "Analyzer returned nothing for \"%s\"", chunk));
}
} catch (IOException e){
throw new ParseException(
String.format(getLocale(), "IO error while trying to analyze single term: \"%s\"", termStr));
+ } finally {
+ IOUtils.closeWhileHandlingException(stream);
}
return analyzed;
}
Modified: lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java (original)
+++ lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java Mon Oct 7 07:54:32 2013
@@ -34,6 +34,7 @@ import org.apache.lucene.queryparser.fle
import org.apache.lucene.search.*;
import org.apache.lucene.search.BooleanQuery.TooManyClauses;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.Version;
/** This class is overridden by QueryParser in QueryParser.jj
@@ -501,63 +502,55 @@ public abstract class QueryParserBase im
protected Query newFieldQuery(Analyzer analyzer, String field, String queryText, boolean quoted) throws ParseException {
// Use the analyzer to get all the tokens, and then build a TermQuery,
// PhraseQuery, or nothing based on the term count
-
- TokenStream source;
- try {
- source = analyzer.tokenStream(field, queryText);
- source.reset();
- } catch (IOException e) {
- ParseException p = new ParseException("Unable to initialize TokenStream to analyze query text");
- p.initCause(e);
- throw p;
- }
- CachingTokenFilter buffer = new CachingTokenFilter(source);
+ CachingTokenFilter buffer = null;
TermToBytesRefAttribute termAtt = null;
PositionIncrementAttribute posIncrAtt = null;
int numTokens = 0;
-
- buffer.reset();
-
- if (buffer.hasAttribute(TermToBytesRefAttribute.class)) {
- termAtt = buffer.getAttribute(TermToBytesRefAttribute.class);
- }
- if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
- posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
- }
-
int positionCount = 0;
boolean severalTokensAtSamePosition = false;
+ boolean hasMoreTokens = false;
+
+ TokenStream source = null;
+ try {
+ source = analyzer.tokenStream(field, queryText);
+ source.reset();
+ buffer = new CachingTokenFilter(source);
+ buffer.reset();
- boolean hasMoreTokens = false;
- if (termAtt != null) {
- try {
- hasMoreTokens = buffer.incrementToken();
- while (hasMoreTokens) {
- numTokens++;
- int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
- if (positionIncrement != 0) {
- positionCount += positionIncrement;
- } else {
- severalTokensAtSamePosition = true;
- }
+ if (buffer.hasAttribute(TermToBytesRefAttribute.class)) {
+ termAtt = buffer.getAttribute(TermToBytesRefAttribute.class);
+ }
+ if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
+ posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
+ }
+
+ if (termAtt != null) {
+ try {
hasMoreTokens = buffer.incrementToken();
+ while (hasMoreTokens) {
+ numTokens++;
+ int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
+ if (positionIncrement != 0) {
+ positionCount += positionIncrement;
+ } else {
+ severalTokensAtSamePosition = true;
+ }
+ hasMoreTokens = buffer.incrementToken();
+ }
+ } catch (IOException e) {
+ // ignore
}
- } catch (IOException e) {
- // ignore
}
- }
- try {
- // rewind the buffer stream
- buffer.reset();
-
- // close original stream - all tokens buffered
- source.close();
- }
- catch (IOException e) {
- ParseException p = new ParseException("Cannot close TokenStream analyzing query text");
+ } catch (IOException e) {
+ ParseException p = new ParseException("Eror analyzing query text");
p.initCause(e);
throw p;
+ } finally {
+ IOUtils.closeWhileHandlingException(source);
}
+
+ // rewind the buffer stream
+ buffer.reset();
BytesRef bytes = termAtt == null ? null : termAtt.getBytesRef();
@@ -843,38 +836,28 @@ public abstract class QueryParserBase im
}
protected BytesRef analyzeMultitermTerm(String field, String part, Analyzer analyzerIn) {
- TokenStream source;
-
if (analyzerIn == null) analyzerIn = analyzer;
+ TokenStream source = null;
try {
source = analyzerIn.tokenStream(field, part);
source.reset();
- } catch (IOException e) {
- throw new RuntimeException("Unable to initialize TokenStream to analyze multiTerm term: " + part, e);
- }
- TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
- BytesRef bytes = termAtt.getBytesRef();
+ TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
+ BytesRef bytes = termAtt.getBytesRef();
- try {
if (!source.incrementToken())
throw new IllegalArgumentException("analyzer returned no terms for multiTerm term: " + part);
termAtt.fillBytesRef();
if (source.incrementToken())
throw new IllegalArgumentException("analyzer returned too many terms for multiTerm term: " + part);
- } catch (IOException e) {
- throw new RuntimeException("error analyzing range part: " + part, e);
- }
-
- try {
source.end();
- source.close();
+ return BytesRef.deepCopyOf(bytes);
} catch (IOException e) {
- throw new RuntimeException("Unable to end & close TokenStream after analyzing multiTerm term: " + part, e);
+ throw new RuntimeException("Error analyzing multiTerm term: " + part, e);
+ } finally {
+ IOUtils.closeWhileHandlingException(source);
}
-
- return BytesRef.deepCopyOf(bytes);
}
/**
Modified: lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java (original)
+++ lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java Mon Oct 7 07:54:32 2013
@@ -44,6 +44,7 @@ import org.apache.lucene.queryparser.fle
import org.apache.lucene.queryparser.flexible.standard.nodes.RegexpQueryNode;
import org.apache.lucene.queryparser.flexible.standard.nodes.StandardBooleanQueryNode;
import org.apache.lucene.queryparser.flexible.standard.nodes.WildcardQueryNode;
+import org.apache.lucene.util.IOUtils;
/**
* This processor verifies if {@link ConfigurationKeys#ANALYZER}
@@ -113,52 +114,48 @@ public class AnalyzerQueryNodeProcessor
String text = fieldNode.getTextAsString();
String field = fieldNode.getFieldAsString();
- TokenStream source;
- try {
- source = this.analyzer.tokenStream(field, text);
- source.reset();
- } catch (IOException e1) {
- throw new RuntimeException(e1);
- }
- CachingTokenFilter buffer = new CachingTokenFilter(source);
-
+ CachingTokenFilter buffer = null;
PositionIncrementAttribute posIncrAtt = null;
int numTokens = 0;
int positionCount = 0;
boolean severalTokensAtSamePosition = false;
-
- if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
- posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
- }
-
+
+ TokenStream source = null;
try {
+ source = this.analyzer.tokenStream(field, text);
+ source.reset();
+ buffer = new CachingTokenFilter(source);
- while (buffer.incrementToken()) {
- numTokens++;
- int positionIncrement = (posIncrAtt != null) ? posIncrAtt
- .getPositionIncrement() : 1;
- if (positionIncrement != 0) {
- positionCount += positionIncrement;
+ if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
+ posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
+ }
- } else {
- severalTokensAtSamePosition = true;
- }
+ try {
- }
+ while (buffer.incrementToken()) {
+ numTokens++;
+ int positionIncrement = (posIncrAtt != null) ? posIncrAtt
+ .getPositionIncrement() : 1;
+ if (positionIncrement != 0) {
+ positionCount += positionIncrement;
- } catch (IOException e) {
- // ignore
- }
+ } else {
+ severalTokensAtSamePosition = true;
+ }
- try {
- // rewind the buffer stream
- buffer.reset();
+ }
- // close original stream - all tokens buffered
- source.close();
+ } catch (IOException e) {
+ // ignore
+ }
} catch (IOException e) {
- // ignore
+ throw new RuntimeException(e);
+ } finally {
+ IOUtils.closeWhileHandlingException(source);
}
+
+ // rewind the buffer stream
+ buffer.reset();
if (!buffer.hasAttribute(CharTermAttribute.class)) {
return new NoTokenFoundQueryNode();
Modified: lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java (original)
+++ lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java Mon Oct 7 07:54:32 2013
@@ -13,6 +13,7 @@ import org.apache.lucene.analysis.tokena
import org.apache.lucene.queries.mlt.MoreLikeThisQuery;
import org.apache.lucene.queryparser.xml.QueryBuilder;
import org.apache.lucene.search.Query;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.queryparser.xml.DOMUtils;
import org.apache.lucene.queryparser.xml.ParserException;
import org.w3c.dom.Element;
@@ -73,18 +74,20 @@ public class LikeThisQueryBuilder implem
if ((stopWords != null) && (fields != null)) {
stopWordsSet = new HashSet<String>();
for (String field : fields) {
+ TokenStream ts = null;
try {
- TokenStream ts = analyzer.tokenStream(field, stopWords);
+ ts = analyzer.tokenStream(field, stopWords);
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
ts.reset();
while (ts.incrementToken()) {
stopWordsSet.add(termAtt.toString());
}
ts.end();
- ts.close();
} catch (IOException ioe) {
throw new ParserException("IoException parsing stop words list in "
+ getClass().getName() + ":" + ioe.getLocalizedMessage());
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
}
}
Modified: lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java (original)
+++ lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java Mon Oct 7 07:54:32 2013
@@ -8,6 +8,7 @@ import org.apache.lucene.search.spans.Sp
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.queryparser.xml.DOMUtils;
import org.apache.lucene.queryparser.xml.ParserException;
import org.w3c.dom.Element;
@@ -49,9 +50,11 @@ public class SpanOrTermsBuilder extends
String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
String value = DOMUtils.getNonBlankTextOrFail(e);
+ List<SpanQuery> clausesList = new ArrayList<SpanQuery>();
+
+ TokenStream ts = null;
try {
- List<SpanQuery> clausesList = new ArrayList<SpanQuery>();
- TokenStream ts = analyzer.tokenStream(fieldName, value);
+ ts = analyzer.tokenStream(fieldName, value);
TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
BytesRef bytes = termAtt.getBytesRef();
ts.reset();
@@ -61,13 +64,14 @@ public class SpanOrTermsBuilder extends
clausesList.add(stq);
}
ts.end();
- ts.close();
SpanOrQuery soq = new SpanOrQuery(clausesList.toArray(new SpanQuery[clausesList.size()]));
soq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f));
return soq;
}
catch (IOException ioe) {
throw new ParserException("IOException parsing value:" + value);
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
}
Modified: lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java (original)
+++ lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java Mon Oct 7 07:54:32 2013
@@ -6,6 +6,7 @@ import org.apache.lucene.analysis.tokena
import org.apache.lucene.search.Filter;
import org.apache.lucene.queries.TermsFilter;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.queryparser.xml.DOMUtils;
import org.apache.lucene.queryparser.xml.FilterBuilder;
import org.apache.lucene.queryparser.xml.ParserException;
@@ -54,8 +55,9 @@ public class TermsFilterBuilder implemen
String text = DOMUtils.getNonBlankTextOrFail(e);
String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
+ TokenStream ts = null;
try {
- TokenStream ts = analyzer.tokenStream(fieldName, text);
+ ts = analyzer.tokenStream(fieldName, text);
TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
BytesRef bytes = termAtt.getBytesRef();
ts.reset();
@@ -64,10 +66,11 @@ public class TermsFilterBuilder implemen
terms.add(BytesRef.deepCopyOf(bytes));
}
ts.end();
- ts.close();
}
catch (IOException ioe) {
throw new RuntimeException("Error constructing terms from index:" + ioe);
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
return new TermsFilter(fieldName, terms);
}
Modified: lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java (original)
+++ lucene/dev/branches/branch_4x/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsQueryBuilder.java Mon Oct 7 07:54:32 2013
@@ -9,6 +9,7 @@ import org.apache.lucene.search.BooleanQ
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.queryparser.xml.DOMUtils;
import org.apache.lucene.queryparser.xml.ParserException;
import org.apache.lucene.queryparser.xml.QueryBuilder;
@@ -51,8 +52,9 @@ public class TermsQueryBuilder implement
BooleanQuery bq = new BooleanQuery(DOMUtils.getAttribute(e, "disableCoord", false));
bq.setMinimumNumberShouldMatch(DOMUtils.getAttribute(e, "minimumNumberShouldMatch", 0));
+ TokenStream ts = null;
try {
- TokenStream ts = analyzer.tokenStream(fieldName, text);
+ ts = analyzer.tokenStream(fieldName, text);
TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
Term term = null;
BytesRef bytes = termAtt.getBytesRef();
@@ -63,10 +65,11 @@ public class TermsQueryBuilder implement
bq.add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.SHOULD));
}
ts.end();
- ts.close();
}
catch (IOException ioe) {
throw new RuntimeException("Error constructing terms from index:" + ioe);
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
bq.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f));
Modified: lucene/dev/branches/branch_4x/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_4x/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java?rev=1529780&r1=1529779&r2=1529780&view=diff
==============================================================================
--- lucene/dev/branches/branch_4x/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java (original)
+++ lucene/dev/branches/branch_4x/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java Mon Oct 7 07:54:32 2013
@@ -35,6 +35,7 @@ import org.apache.lucene.search.similari
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.PriorityQueue;
/**
@@ -193,67 +194,70 @@ public class FuzzyLikeThisQuery extends
private void addTerms(IndexReader reader, FieldVals f) throws IOException {
if (f.queryString == null) return;
- TokenStream ts = analyzer.tokenStream(f.fieldName, f.queryString);
- CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
-
- int corpusNumDocs = reader.numDocs();
- HashSet<String> processedTerms = new HashSet<String>();
- ts.reset();
final Terms terms = MultiFields.getTerms(reader, f.fieldName);
if (terms == null) {
return;
}
- while (ts.incrementToken()) {
- String term = termAtt.toString();
- if (!processedTerms.contains(term)) {
- processedTerms.add(term);
- ScoreTermQueue variantsQ = new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term
- float minScore = 0;
- Term startTerm = new Term(f.fieldName, term);
- AttributeSource atts = new AttributeSource();
- MaxNonCompetitiveBoostAttribute maxBoostAtt =
+ TokenStream ts = analyzer.tokenStream(f.fieldName, f.queryString);
+ try {
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+
+ int corpusNumDocs = reader.numDocs();
+ HashSet<String> processedTerms = new HashSet<String>();
+ ts.reset();
+ while (ts.incrementToken()) {
+ String term = termAtt.toString();
+ if (!processedTerms.contains(term)) {
+ processedTerms.add(term);
+ ScoreTermQueue variantsQ = new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term
+ float minScore = 0;
+ Term startTerm = new Term(f.fieldName, term);
+ AttributeSource atts = new AttributeSource();
+ MaxNonCompetitiveBoostAttribute maxBoostAtt =
atts.addAttribute(MaxNonCompetitiveBoostAttribute.class);
- SlowFuzzyTermsEnum fe = new SlowFuzzyTermsEnum(terms, atts, startTerm, f.minSimilarity, f.prefixLength);
- //store the df so all variants use same idf
- int df = reader.docFreq(startTerm);
- int numVariants = 0;
- int totalVariantDocFreqs = 0;
- BytesRef possibleMatch;
- BoostAttribute boostAtt =
+ SlowFuzzyTermsEnum fe = new SlowFuzzyTermsEnum(terms, atts, startTerm, f.minSimilarity, f.prefixLength);
+ //store the df so all variants use same idf
+ int df = reader.docFreq(startTerm);
+ int numVariants = 0;
+ int totalVariantDocFreqs = 0;
+ BytesRef possibleMatch;
+ BoostAttribute boostAtt =
fe.attributes().addAttribute(BoostAttribute.class);
- while ((possibleMatch = fe.next()) != null) {
- numVariants++;
- totalVariantDocFreqs += fe.docFreq();
- float score = boostAtt.getBoost();
- if (variantsQ.size() < MAX_VARIANTS_PER_TERM || score > minScore) {
- ScoreTerm st = new ScoreTerm(new Term(startTerm.field(), BytesRef.deepCopyOf(possibleMatch)), score, startTerm);
- variantsQ.insertWithOverflow(st);
- minScore = variantsQ.top().score; // maintain minScore
+ while ((possibleMatch = fe.next()) != null) {
+ numVariants++;
+ totalVariantDocFreqs += fe.docFreq();
+ float score = boostAtt.getBoost();
+ if (variantsQ.size() < MAX_VARIANTS_PER_TERM || score > minScore) {
+ ScoreTerm st = new ScoreTerm(new Term(startTerm.field(), BytesRef.deepCopyOf(possibleMatch)), score, startTerm);
+ variantsQ.insertWithOverflow(st);
+ minScore = variantsQ.top().score; // maintain minScore
+ }
+ maxBoostAtt.setMaxNonCompetitiveBoost(variantsQ.size() >= MAX_VARIANTS_PER_TERM ? minScore : Float.NEGATIVE_INFINITY);
}
- maxBoostAtt.setMaxNonCompetitiveBoost(variantsQ.size() >= MAX_VARIANTS_PER_TERM ? minScore : Float.NEGATIVE_INFINITY);
- }
- if (numVariants > 0) {
- int avgDf = totalVariantDocFreqs / numVariants;
- if (df == 0)//no direct match we can use as df for all variants
- {
- df = avgDf; //use avg df of all variants
- }
+ if (numVariants > 0) {
+ int avgDf = totalVariantDocFreqs / numVariants;
+ if (df == 0)//no direct match we can use as df for all variants
+ {
+ df = avgDf; //use avg df of all variants
+ }
- // take the top variants (scored by edit distance) and reset the score
- // to include an IDF factor then add to the global queue for ranking
- // overall top query terms
- int size = variantsQ.size();
- for (int i = 0; i < size; i++) {
- ScoreTerm st = variantsQ.pop();
- st.score = (st.score * st.score) * sim.idf(df, corpusNumDocs);
- q.insertWithOverflow(st);
+ // take the top variants (scored by edit distance) and reset the score
+ // to include an IDF factor then add to the global queue for ranking
+ // overall top query terms
+ int size = variantsQ.size();
+ for (int i = 0; i < size; i++) {
+ ScoreTerm st = variantsQ.pop();
+ st.score = (st.score * st.score) * sim.idf(df, corpusNumDocs);
+ q.insertWithOverflow(st);
+ }
}
}
}
+ ts.end();
+ } finally {
+ IOUtils.closeWhileHandlingException(ts);
}
- ts.end();
- ts.close();
}
@Override