You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by us...@apache.org on 2010/02/10 14:36:04 UTC
svn commit: r908496 [1/6] - in /lucene/java/trunk: ./
src/test/org/apache/lucene/ src/test/org/apache/lucene/analysis/
src/test/org/apache/lucene/collation/ src/test/org/apache/lucene/document/
src/test/org/apache/lucene/index/ src/test/org/apache/luce...
Author: uschindler
Date: Wed Feb 10 13:35:57 2010
New Revision: 908496
URL: http://svn.apache.org/viewvc?rev=908496&view=rev
Log:
LUCENE-2248: Change core tests to use a global Version constant
Modified:
lucene/java/trunk/CHANGES.txt
lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java
lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java
lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java
lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharArrayMap.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharArraySet.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharTokenizers.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java
lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java
lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java
lucene/java/trunk/src/test/org/apache/lucene/document/TestBinaryDocument.java
lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java
lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestCrash.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestDeletionPolicy.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestDirectoryReader.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestDoc.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestFieldsReader.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestFilterIndexReader.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReader.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderClone.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterReader.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyBug.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestNorms.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestOmitTf.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelTermEnum.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestStressIndexing2.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestThreadedOptimize.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestTransactionRollback.java
lucene/java/trunk/src/test/org/apache/lucene/index/TestTransactions.java
lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java
lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestQueryParser.java
lucene/java/trunk/src/test/org/apache/lucene/search/BaseTestRangeFilter.java
lucene/java/trunk/src/test/org/apache/lucene/search/QueryUtils.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestBoolean2.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestBooleanOr.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestBooleanQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestBooleanScorer.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestDateFilter.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestDateSort.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestDocBoost.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestDocIdSet.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestElevationComparator.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestExplanations.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestFieldCache.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestFilteredQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestFilteredSearch.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestFuzzyQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiSearcher.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestNot.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestPhraseQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestPositionIncrement.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestPrefixFilter.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestPrefixQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestQueryTermVector.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestScorerPerf.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestSetNorm.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestSimilarity.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestSimpleExplanations.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestSort.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestSpanQueryFilter.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestTermRangeFilter.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestTermRangeQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestTermScorer.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestTermVectors.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestThreadSafe.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
lucene/java/trunk/src/test/org/apache/lucene/search/TestWildcard.java
lucene/java/trunk/src/test/org/apache/lucene/search/function/FunctionTestSetup.java
lucene/java/trunk/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
lucene/java/trunk/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestBasics.java
lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestSpans.java
lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
lucene/java/trunk/src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java
lucene/java/trunk/src/test/org/apache/lucene/store/TestBufferedIndexInput.java
lucene/java/trunk/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
lucene/java/trunk/src/test/org/apache/lucene/store/TestLockFactory.java
lucene/java/trunk/src/test/org/apache/lucene/store/TestRAMDirectory.java
lucene/java/trunk/src/test/org/apache/lucene/store/TestWindowsMMap.java
lucene/java/trunk/src/test/org/apache/lucene/util/LuceneTestCase.java
lucene/java/trunk/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
lucene/java/trunk/src/test/org/apache/lucene/util/TestCharacterUtils.java
lucene/java/trunk/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
Modified: lucene/java/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/java/trunk/CHANGES.txt?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/CHANGES.txt (original)
+++ lucene/java/trunk/CHANGES.txt Wed Feb 10 13:35:57 2010
@@ -233,6 +233,10 @@
* LUCENE-2207, LUCENE-2219: Improve BaseTokenStreamTestCase to check if
end() is implemented correctly. (Koji Sekiguchi, Robert Muir)
+* LUCENE-2248, LUCENE-2251: Refactor tests to not use Version.LUCENE_CURRENT,
+ but instead use a global static value from LuceneTestCase(J4), that
+ contains the release version. (Uwe Schindler, Simon Willnauer)
+
======================= Release 3.0.0 2009-11-25 =======================
Changes in backwards compatibility policy
Modified: lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestDemo.java Wed Feb 10 13:35:57 2010
@@ -32,7 +32,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* A very simple demo used in the API documentation (src/java/overview.html).
@@ -44,7 +43,7 @@
public void testDemo() throws IOException, ParseException {
- Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
// Store the index in memory:
Directory directory = new RAMDirectory();
@@ -62,7 +61,7 @@
// Now search the index:
IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true
// Parse a simple query that searches for "text":
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fieldname", analyzer);
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fieldname", analyzer);
Query query = parser.parse("text");
ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Modified: lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestMergeSchedulerExternal.java Wed Feb 10 13:35:57 2010
@@ -18,7 +18,6 @@
*/
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.Directory;
@@ -96,7 +95,7 @@
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
MyMergeScheduler ms = new MyMergeScheduler();
writer.setMergeScheduler(ms);
writer.setMaxBufferedDocs(2);
Modified: lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestSearch.java Wed Feb 10 13:35:57 2010
@@ -22,7 +22,6 @@
import java.io.StringWriter;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
@@ -74,7 +73,7 @@
throws Exception
{
Directory directory = new RAMDirectory();
- Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(directory, analyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
@@ -108,7 +107,7 @@
};
ScoreDoc[] hits = null;
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "contents", analyzer);
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "contents", analyzer);
parser.setPhraseSlop(4);
for (int j = 0; j < queries.length; j++) {
Query query = parser.parse(queries[j]);
Modified: lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestSearchForDuplicates.java Wed Feb 10 13:35:57 2010
@@ -27,8 +27,6 @@
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.queryParser.*;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.util.LuceneTestCase;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
@@ -79,7 +77,7 @@
private void doTest(PrintWriter out, boolean useCompoundFiles) throws Exception {
Directory directory = new RAMDirectory();
- Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(directory, analyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
@@ -98,7 +96,7 @@
// try a search without OR
Searcher searcher = new IndexSearcher(directory, true);
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, PRIORITY_FIELD, analyzer);
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer);
Query query = parser.parse(HIGH_PRIORITY);
out.println("Query: " + query.toString(PRIORITY_FIELD));
@@ -113,7 +111,7 @@
searcher = new IndexSearcher(directory, true);
hits = null;
- parser = new QueryParser(Version.LUCENE_CURRENT, PRIORITY_FIELD, analyzer);
+ parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer);
query = parser.parse(HIGH_PRIORITY + " OR " + MED_PRIORITY);
out.println("Query: " + query.toString(PRIORITY_FIELD));
Modified: lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java Wed Feb 10 13:35:57 2010
@@ -67,7 +67,7 @@
Directory dir = new MockRAMDirectory();
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
// Force frequent flushes
writer.setMaxBufferedDocs(2);
Document doc = new Document();
@@ -83,7 +83,7 @@
writer.close();
copyFiles(dir, cp);
- writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
copyFiles(dir, cp);
for(int i=0;i<7;i++) {
writer.addDocument(doc);
@@ -95,7 +95,7 @@
writer.close();
copyFiles(dir, cp);
dp.release();
- writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
writer.close();
try {
copyFiles(dir, cp);
@@ -111,7 +111,7 @@
final long stopTime = System.currentTimeMillis() + 1000;
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
- final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
+ final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
// Force frequent flushes
writer.setMaxBufferedDocs(2);
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java Wed Feb 10 13:35:57 2010
@@ -18,8 +18,6 @@
*/
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
-
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
@@ -29,7 +27,7 @@
// testLain1Accents() is a copy of TestLatin1AccentFilter.testU().
public void testLatin1Accents() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader
("Des mot clés à LA CHAÃNE à à à à à Ã
à à à à à à à à à à IJ à Ã"
+" à à à à à à Šà à à à à à Ÿ à á â ã ä å æ ç è é ê ë ì à î ï ij"
+" ð ñ ò ó ô õ ö ø Šà þ ù ú û ü ý ÿ ï¬ ï¬"));
@@ -1890,7 +1888,7 @@
expectedOutputTokens.add(expected.toString());
}
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(inputText.toString()));
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(inputText.toString()));
ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
Iterator<String> expectedIter = expectedOutputTokens.iterator();
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java Wed Feb 10 13:35:57 2010
@@ -26,7 +26,6 @@
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.Payload;
-import org.apache.lucene.util.Version;
public class TestAnalyzers extends BaseTokenStreamTestCase {
@@ -35,7 +34,7 @@
}
public void testSimple() throws Exception {
- Analyzer a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "foo bar FOO BAR",
new String[] { "foo", "bar", "foo", "bar" });
assertAnalyzesTo(a, "foo bar . FOO <> BAR",
@@ -55,7 +54,7 @@
}
public void testNull() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "foo bar FOO BAR",
new String[] { "foo", "bar", "FOO", "BAR" });
assertAnalyzesTo(a, "foo bar . FOO <> BAR",
@@ -75,7 +74,7 @@
}
public void testStop() throws Exception {
- Analyzer a = new StopAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new StopAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "foo bar FOO BAR",
new String[] { "foo", "bar", "foo", "bar" });
assertAnalyzesTo(a, "foo a bar such FOO THESE BAR",
@@ -97,11 +96,11 @@
public void testPayloadCopy() throws IOException {
String s = "how now brown cow";
TokenStream ts;
- ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(s));
+ ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(s));
ts = new PayloadSetter(ts);
verifyPayload(ts);
- ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(s));
+ ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(s));
ts = new PayloadSetter(ts);
verifyPayload(ts);
}
@@ -122,12 +121,12 @@
private static class MyStandardAnalyzer extends StandardAnalyzer {
public MyStandardAnalyzer() {
- super(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ super(TEST_VERSION_CURRENT);
}
@Override
public TokenStream tokenStream(String field, Reader reader) {
- return new WhitespaceAnalyzer(Version.LUCENE_CURRENT).tokenStream(field, reader);
+ return new WhitespaceAnalyzer(TEST_VERSION_CURRENT).tokenStream(field, reader);
}
}
@@ -144,8 +143,8 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- return new LowerCaseFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader));
+ return new LowerCaseFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
}
}
@@ -192,9 +191,9 @@
public void testLowerCaseFilterLowSurrogateLeftover() throws IOException {
// test if the limit of the termbuffer is correctly used with supplementary
// chars
- WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader("BogustermBogusterm\udc16"));
- LowerCaseFilter filter = new LowerCaseFilter(Version.LUCENE_CURRENT,
+ LowerCaseFilter filter = new LowerCaseFilter(TEST_VERSION_CURRENT,
tokenizer);
assertTokenStreamContents(filter, new String[] {"bogustermbogusterm\udc16"});
filter.reset();
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java Wed Feb 10 13:35:57 2010
@@ -31,14 +31,13 @@
import org.apache.lucene.index.TermPositions;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
public void testCaching() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
TokenStream stream = new TokenStream() {
private int index = 0;
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharArrayMap.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharArrayMap.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharArrayMap.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharArrayMap.java Wed Feb 10 13:35:57 2010
@@ -19,13 +19,12 @@
import java.util.*;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestCharArrayMap extends LuceneTestCase {
Random r = newRandom();
public void doRandom(int iter, boolean ignoreCase) {
- CharArrayMap<Integer> map = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, 1, ignoreCase);
+ CharArrayMap<Integer> map = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 1, ignoreCase);
HashMap<String,Integer> hmap = new HashMap<String,Integer>();
char[] key;
@@ -63,7 +62,7 @@
}
public void testMethods() {
- CharArrayMap<Integer> cm = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, 2, false);
+ CharArrayMap<Integer> cm = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 2, false);
HashMap<String,Integer> hm = new HashMap<String,Integer>();
hm.put("foo",1);
hm.put("bar",2);
@@ -131,7 +130,7 @@
}
public void testModifyOnUnmodifiable(){
- CharArrayMap<Integer> map = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, 2, false);
+ CharArrayMap<Integer> map = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 2, false);
map.put("foo",1);
map.put("bar",2);
final int size = map.size();
@@ -228,7 +227,7 @@
}
public void testToString() {
- CharArrayMap<Integer> cm = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, Collections.singletonMap("test",1), false);
+ CharArrayMap<Integer> cm = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false);
assertEquals("[test]",cm.keySet().toString());
assertEquals("[1]",cm.values().toString());
assertEquals("[test=1]",cm.entrySet().toString());
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharArraySet.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharArraySet.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharArraySet.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharArraySet.java Wed Feb 10 13:35:57 2010
@@ -41,7 +41,7 @@
public void testRehash() throws Exception {
- CharArraySet cas = new CharArraySet(Version.LUCENE_CURRENT, 0, true);
+ CharArraySet cas = new CharArraySet(TEST_VERSION_CURRENT, 0, true);
for(int i=0;i<TEST_STOP_WORDS.length;i++)
cas.add(TEST_STOP_WORDS[i]);
assertEquals(TEST_STOP_WORDS.length, cas.size());
@@ -52,7 +52,7 @@
public void testNonZeroOffset() {
String[] words={"Hello","World","this","is","a","test"};
char[] findme="xthisy".toCharArray();
- CharArraySet set=new CharArraySet(Version.LUCENE_CURRENT, 10,true);
+ CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10,true);
set.addAll(Arrays.asList(words));
assertTrue(set.contains(findme, 1, 4));
assertTrue(set.contains(new String(findme,1,4)));
@@ -64,7 +64,7 @@
}
public void testObjectContains() {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 10, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
Integer val = Integer.valueOf(1);
set.add(val);
assertTrue(set.contains(val));
@@ -80,7 +80,7 @@
}
public void testClear(){
- CharArraySet set=new CharArraySet(Version.LUCENE_CURRENT, 10,true);
+ CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10,true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
assertEquals("Not all words added", TEST_STOP_WORDS.length, set.size());
set.clear();
@@ -94,7 +94,7 @@
}
public void testModifyOnUnmodifiable(){
- CharArraySet set=new CharArraySet(Version.LUCENE_CURRENT, 10, true);
+ CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10, true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
final int size = set.size();
set = CharArraySet.unmodifiableSet(set);
@@ -150,7 +150,7 @@
// current key (now a char[]) on a Set<String> would not hit any element of the CAS and therefor never call
// remove() on the iterator
try{
- set.removeAll(new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(TEST_STOP_WORDS), true));
+ set.removeAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true));
fail("Modified unmodifiable set");
}catch (UnsupportedOperationException e) {
// expected
@@ -158,7 +158,7 @@
}
try{
- set.retainAll(new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(NOT_IN_SET), true));
+ set.retainAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(NOT_IN_SET), true));
fail("Modified unmodifiable set");
}catch (UnsupportedOperationException e) {
// expected
@@ -179,7 +179,7 @@
}
public void testUnmodifiableSet(){
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 10,true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10,true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
set.add(Integer.valueOf(1));
final int size = set.size();
@@ -209,7 +209,7 @@
"\ud801\udc1c\ud801\udc1cCDE", "A\ud801\udc1cB"};
String[] lowerArr = new String[] {"abc\ud801\udc44",
"\ud801\udc44\ud801\udc44cde", "a\ud801\udc44b"};
- CharArraySet set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS), true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true);
for (String upper : upperArr) {
set.add(upper);
}
@@ -217,7 +217,7 @@
assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
assertTrue(String.format(missing, lowerArr[i]), set.contains(lowerArr[i]));
}
- set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS), false);
+ set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), false);
for (String upper : upperArr) {
set.add(upper);
}
@@ -235,7 +235,7 @@
String[] lowerArr = new String[] { "abc\uD800", "abc\uD800efg",
"\uD800efg", "\uD800\ud801\udc44b" };
- CharArraySet set = new CharArraySet(Version.LUCENE_31, Arrays
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays
.asList(TEST_STOP_WORDS), true);
for (String upper : upperArr) {
set.add(upper);
@@ -244,7 +244,7 @@
assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
assertTrue(String.format(missing, lowerArr[i]), set.contains(lowerArr[i]));
}
- set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS),
+ set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS),
false);
for (String upper : upperArr) {
set.add(upper);
@@ -328,8 +328,8 @@
}
public void testCopyCharArraySetBWCompat() {
- CharArraySet setIngoreCase = new CharArraySet(Version.LUCENE_CURRENT, 10, true);
- CharArraySet setCaseSensitive = new CharArraySet(Version.LUCENE_CURRENT, 10, false);
+ CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+ CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<String>();
@@ -375,8 +375,8 @@
* Test the static #copy() function with a CharArraySet as a source
*/
public void testCopyCharArraySet() {
- CharArraySet setIngoreCase = new CharArraySet(Version.LUCENE_CURRENT, 10, true);
- CharArraySet setCaseSensitive = new CharArraySet(Version.LUCENE_CURRENT, 10, false);
+ CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+ CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<String>();
@@ -388,8 +388,8 @@
setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
setCaseSensitive.add(Integer.valueOf(1));
- CharArraySet copy = CharArraySet.copy(Version.LUCENE_CURRENT, setIngoreCase);
- CharArraySet copyCaseSens = CharArraySet.copy(Version.LUCENE_CURRENT, setCaseSensitive);
+ CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, setIngoreCase);
+ CharArraySet copyCaseSens = CharArraySet.copy(TEST_VERSION_CURRENT, setCaseSensitive);
assertEquals(setIngoreCase.size(), copy.size());
assertEquals(setCaseSensitive.size(), copy.size());
@@ -431,7 +431,7 @@
}
set.addAll(Arrays.asList(TEST_STOP_WORDS));
- CharArraySet copy = CharArraySet.copy(Version.LUCENE_CURRENT, set);
+ CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, set);
assertEquals(set.size(), copy.size());
assertEquals(set.size(), copy.size());
@@ -461,7 +461,7 @@
*/
public void testCopyEmptySet() {
assertSame(CharArraySet.EMPTY_SET,
- CharArraySet.copy(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET));
+ CharArraySet.copy(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET));
}
/**
@@ -483,7 +483,7 @@
* Test for NPE
*/
public void testContainsWithNull() {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
try {
set.contains((char[]) null, 0, 10);
fail("null value must raise NPE");
@@ -506,7 +506,7 @@
assertTrue("in 3.0 version, iterator should be CharArraySetIterator",
((Iterator) CharArraySet.copy(Version.LUCENE_30, hset).iterator()) instanceof CharArraySet.CharArraySetIterator);
- CharArraySet set = CharArraySet.copy(Version.LUCENE_CURRENT, hset);
+ CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, hset);
assertFalse("in current version, iterator should not be CharArraySetIterator",
((Iterator) set.iterator()) instanceof CharArraySet.CharArraySetIterator);
@@ -525,7 +525,7 @@
}
public void testToString() {
- CharArraySet set = CharArraySet.copy(Version.LUCENE_CURRENT, Collections.singleton("test"));
+ CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, Collections.singleton("test"));
assertEquals("[test]", set.toString());
set.add("test2");
assertTrue(set.toString().contains(", "));
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharTokenizers.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharTokenizers.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharTokenizers.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCharTokenizers.java Wed Feb 10 13:35:57 2010
@@ -46,7 +46,7 @@
// internal buffer size is 1024 make sure we have a surrogate pair right at the border
builder.insert(1023, "\ud801\udc1c");
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
- Version.LUCENE_CURRENT, new StringReader(builder.toString()));
+ TEST_VERSION_CURRENT, new StringReader(builder.toString()));
assertTokenStreamContents(tokenizer, builder.toString().toLowerCase().split(" "));
}
@@ -64,7 +64,7 @@
}
builder.append("\ud801\udc1cabc");
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
- Version.LUCENE_CURRENT, new StringReader(builder.toString()));
+ TEST_VERSION_CURRENT, new StringReader(builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase()});
}
}
@@ -79,7 +79,7 @@
builder.append("A");
}
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
- Version.LUCENE_CURRENT, new StringReader(builder.toString() + builder.toString()));
+ TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
}
@@ -94,13 +94,13 @@
}
builder.append("\ud801\udc1c");
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
- Version.LUCENE_CURRENT, new StringReader(builder.toString() + builder.toString()));
+ TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
}
public void testLowerCaseTokenizer() throws IOException {
StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
- LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(Version.LUCENE_CURRENT,
+ LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT,
reader);
assertTokenStreamContents(tokenizer, new String[] { "tokenizer",
"\ud801\udc44test" });
@@ -115,7 +115,7 @@
public void testWhitespaceTokenizer() throws IOException {
StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
- WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
reader);
assertTokenStreamContents(tokenizer, new String[] { "Tokenizer",
"\ud801\udc1ctest" });
@@ -132,7 +132,7 @@
public void testIsTokenCharCharInSubclass() {
new TestingCharTokenizer(Version.LUCENE_30, new StringReader(""));
try {
- new TestingCharTokenizer(Version.LUCENE_CURRENT, new StringReader(""));
+ new TestingCharTokenizer(TEST_VERSION_CURRENT, new StringReader(""));
fail("version 3.1 is not permitted if char based method is implemented");
} catch (IllegalArgumentException e) {
// expected
@@ -142,7 +142,7 @@
public void testNormalizeCharInSubclass() {
new TestingCharTokenizerNormalize(Version.LUCENE_30, new StringReader(""));
try {
- new TestingCharTokenizerNormalize(Version.LUCENE_CURRENT,
+ new TestingCharTokenizerNormalize(TEST_VERSION_CURRENT,
new StringReader(""));
fail("version 3.1 is not permitted if char based method is implemented");
} catch (IllegalArgumentException e) {
@@ -154,7 +154,7 @@
new TestingCharTokenizerNormalizeIsTokenChar(Version.LUCENE_30,
new StringReader(""));
try {
- new TestingCharTokenizerNormalizeIsTokenChar(Version.LUCENE_CURRENT,
+ new TestingCharTokenizerNormalizeIsTokenChar(TEST_VERSION_CURRENT,
new StringReader(""));
fail("version 3.1 is not permitted if char based method is implemented");
} catch (IllegalArgumentException e) {
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java Wed Feb 10 13:35:57 2010
@@ -18,13 +18,11 @@
*/
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
-
import java.io.StringReader;
public class TestISOLatin1AccentFilter extends BaseTokenStreamTestCase {
public void testU() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("Des mot clés à LA CHAÃNE à à à à à Ã
à à à à à à à à à à IJ à à à à à à à à Šà à à à à à Ÿ à á â ã ä Ã¥ æ ç è é ê ë ì à î ï ij ð ñ ò ó ô õ ö ø Šà þ ù ú û ü ý ÿ ï¬ ï¬"));
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("Des mot clés à LA CHAÃNE à à à à à Ã
à à à à à à à à à à IJ à à à à à à à à Šà à à à à à Ÿ à á â ã ä Ã¥ æ ç è é ê ë ì à î ï ij ð ñ ò ó ô õ ö ø Šà þ ù ú û ü ý ÿ ï¬ ï¬"));
ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream);
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
assertTermEquals("Des", filter, termAtt);
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java Wed Feb 10 13:35:57 2010
@@ -31,7 +31,6 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
@@ -43,7 +42,7 @@
super.setUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory,
- new SimpleAnalyzer(Version.LUCENE_CURRENT),
+ new SimpleAnalyzer(TEST_VERSION_CURRENT),
true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
@@ -57,10 +56,10 @@
}
public void testPerFieldAnalyzer() throws Exception {
- PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(TEST_VERSION_CURRENT));
analyzer.addAnalyzer("partnum", new KeywordAnalyzer());
- QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, "description", analyzer);
+ QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, "description", analyzer);
Query query = queryParser.parse("partnum:Q36 AND SPACE");
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java Wed Feb 10 13:35:57 2010
@@ -7,7 +7,6 @@
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
import org.junit.Test;
/**
@@ -34,21 +33,21 @@
@Test
public void testIncrementToken() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_31, 5, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 5, true);
set.add("lucenefox");
String[] output = new String[] { "the", "quick", "brown", "LuceneFox",
"jumps" };
assertTokenStreamContents(new LowerCaseFilterMock(
- new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"The quIck browN LuceneFox Jumps")), set)), output);
Set<String> jdkSet = new HashSet<String>();
jdkSet.add("LuceneFox");
assertTokenStreamContents(new LowerCaseFilterMock(
- new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"The quIck browN LuceneFox Jumps")), jdkSet)), output);
Set<?> set2 = set;
assertTokenStreamContents(new LowerCaseFilterMock(
- new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"The quIck browN LuceneFox Jumps")), set2)), output);
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java Wed Feb 10 13:35:57 2010
@@ -18,14 +18,12 @@
*/
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
-
import java.io.StringReader;
public class TestLengthFilter extends BaseTokenStreamTestCase {
public void testFilter() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader("short toolong evenmuchlongertext a ab toolong foo"));
LengthFilter filter = new LengthFilter(stream, 2, 6);
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestMappingCharFilter.java Wed Feb 10 13:35:57 2010
@@ -19,8 +19,6 @@
import java.io.StringReader;
-import org.apache.lucene.util.Version;
-
public class TestMappingCharFilter extends BaseTokenStreamTestCase {
NormalizeCharMap normMap;
@@ -60,55 +58,55 @@
public void testNothingChange() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "x" ) );
- TokenStream ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"x"}, new int[]{0}, new int[]{1});
}
public void test1to1() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "h" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"i"}, new int[]{0}, new int[]{1});
}
public void test1to2() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "j" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"jj"}, new int[]{0}, new int[]{1});
}
public void test1to3() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "k" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"kkk"}, new int[]{0}, new int[]{1});
}
public void test2to4() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "ll" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"llll"}, new int[]{0}, new int[]{2});
}
public void test2to1() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "aa" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"a"}, new int[]{0}, new int[]{2});
}
public void test3to1() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "bbb" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"b"}, new int[]{0}, new int[]{3});
}
public void test4to2() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "cccc" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"cc"}, new int[]{0}, new int[]{4});
}
public void test5to0() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "empty" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[0]);
}
@@ -132,7 +130,7 @@
//
public void testTokenStream() throws Exception {
CharStream cs = new MappingCharFilter( normMap, CharReader.get( new StringReader( "h i j k ll cccc bbb aa" ) ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts,
new String[]{"i","i","jj","kkk","llll","cc","b","a"},
new int[]{0,2,4,6,8,11,16,20},
@@ -153,7 +151,7 @@
public void testChained() throws Exception {
CharStream cs = new MappingCharFilter( normMap,
new MappingCharFilter( normMap, CharReader.get( new StringReader( "aaaa ll h" ) ) ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts,
new String[]{"a","llllllll","i"},
new int[]{0,5,8},
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java Wed Feb 10 13:35:57 2010
@@ -3,7 +3,6 @@
import java.io.StringReader;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -26,8 +25,8 @@
public void testPerField() throws Exception {
String text = "Qwerty";
PerFieldAnalyzerWrapper analyzer =
- new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
- analyzer.addAnalyzer("special", new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
+ analyzer.addAnalyzer("special", new SimpleAnalyzer(TEST_VERSION_CURRENT));
TokenStream tokenStream = analyzer.tokenStream("field",
new StringReader(text));
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPorterStemFilter.java Wed Feb 10 13:35:57 2010
@@ -25,8 +25,6 @@
import java.io.StringReader;
import java.util.zip.ZipFile;
-import org.apache.lucene.util.Version;
-
/**
* Test the PorterStemFilter with Martin Porter's test data.
*/
@@ -60,9 +58,9 @@
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("yourselves");
- Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("yourselves yours"));
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("yourselves yours"));
TokenStream filter = new PorterStemFilter(new KeywordMarkerTokenFilter(tokenizer, set));
assertTokenStreamContents(filter, new String[] {"yourselves", "your"});
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java Wed Feb 10 13:35:57 2010
@@ -23,16 +23,16 @@
public class TestStandardAnalyzer extends BaseTokenStreamTestCase {
- private Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ private Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
public void testMaxTermLength() throws Exception {
- StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer sa = new StandardAnalyzer(TEST_VERSION_CURRENT);
sa.setMaxTokenLength(5);
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "xy", "z"});
}
public void testMaxTermLength2() throws Exception {
- StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer sa = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "toolong", "xy", "z"});
sa.setMaxTokenLength(5);
@@ -96,7 +96,7 @@
public void testLucene1140() throws Exception {
try {
- StandardAnalyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "www.nutch.org.", new String[]{ "www.nutch.org" }, new String[] { "<HOST>" });
} catch (NullPointerException e) {
fail("Should not throw an NPE and it did");
@@ -106,7 +106,7 @@
public void testDomainNames() throws Exception {
// Current lucene should not show the bug
- StandardAnalyzer a2 = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer a2 = new StandardAnalyzer(TEST_VERSION_CURRENT);
// domain names
assertAnalyzesTo(a2, "www.nutch.org", new String[]{"www.nutch.org"});
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java Wed Feb 10 13:35:57 2010
@@ -29,7 +29,7 @@
public class TestStopAnalyzer extends BaseTokenStreamTestCase {
- private StopAnalyzer stop = new StopAnalyzer(Version.LUCENE_CURRENT);
+ private StopAnalyzer stop = new StopAnalyzer(TEST_VERSION_CURRENT);
private Set<Object> inValidTokens = new HashSet<Object>();
public TestStopAnalyzer(String s) {
@@ -82,7 +82,7 @@
stopWordsSet.add("good");
stopWordsSet.add("test");
stopWordsSet.add("analyzer");
- StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_CURRENT, stopWordsSet);
+ StopAnalyzer newStop = new StopAnalyzer(TEST_VERSION_CURRENT, stopWordsSet);
StringReader reader = new StringReader("This is a good test of the english stop analyzer with positions");
int expectedIncr[] = { 1, 1, 1, 3, 1, 1, 1, 2, 1};
TokenStream stream = newStop.tokenStream("test", reader);
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java Wed Feb 10 13:35:57 2010
@@ -38,7 +38,7 @@
public void testExactCase() throws IOException {
StringReader reader = new StringReader("Now is The Time");
Set<String> stopWords = new HashSet<String>(Arrays.asList("is", "the", "Time"));
- TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopWords, false);
+ TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopWords, false);
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
assertEquals("Now", termAtt.term());
@@ -50,7 +50,7 @@
public void testIgnoreCase() throws IOException {
StringReader reader = new StringReader("Now is The Time");
Set<Object> stopWords = new HashSet<Object>(Arrays.asList( "is", "the", "Time" ));
- TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopWords, true);
+ TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopWords, true);
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
assertEquals("Now", termAtt.term());
@@ -60,8 +60,8 @@
public void testStopFilt() throws IOException {
StringReader reader = new StringReader("Now is The Time");
String[] stopWords = new String[] { "is", "the", "Time" };
- Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
- TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
+ Set<Object> stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords);
+ TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet);
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
assertEquals("Now", termAtt.term());
@@ -84,14 +84,14 @@
log(sb.toString());
String stopWords[] = a.toArray(new String[0]);
for (int i=0; i<a.size(); i++) log("Stop: "+stopWords[i]);
- Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
+ Set<Object> stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords);
// with increments
StringReader reader = new StringReader(sb.toString());
- StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
+ StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet);
doTestStopPositons(stpf,true);
// without increments
reader = new StringReader(sb.toString());
- stpf = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
+ stpf = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet);
doTestStopPositons(stpf,false);
// with increments, concatenating two stop filters
ArrayList<String> a0 = new ArrayList<String>();
@@ -107,12 +107,12 @@
for (int i=0; i<a0.size(); i++) log("Stop0: "+stopWords0[i]);
String stopWords1[] = a1.toArray(new String[0]);
for (int i=0; i<a1.size(); i++) log("Stop1: "+stopWords1[i]);
- Set<Object> stopSet0 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords0);
- Set<Object> stopSet1 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords1);
+ Set<Object> stopSet0 = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords0);
+ Set<Object> stopSet1 = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords1);
reader = new StringReader(sb.toString());
- StopFilter stpf0 = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet0); // first part of the set
+ StopFilter stpf0 = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet0); // first part of the set
stpf0.setEnablePositionIncrements(true);
- StopFilter stpf01 = new StopFilter(Version.LUCENE_CURRENT, stpf0, stopSet1); // two stop filters concatenated!
+ StopFilter stpf01 = new StopFilter(TEST_VERSION_CURRENT, stpf0, stopSet1); // two stop filters concatenated!
doTestStopPositons(stpf01,true);
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java Wed Feb 10 13:35:57 2010
@@ -22,8 +22,6 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.English;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
import java.io.StringReader;
@@ -76,7 +74,7 @@
public void testGeneral() throws IOException {
- final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer1.toString())));
+ final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer1.toString())));
final TokenStream sink1 = source.newSinkTokenStream();
final TokenStream sink2 = source.newSinkTokenStream(theFilter);
@@ -90,7 +88,7 @@
}
public void testMultipleSources() throws Exception {
- final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer1.toString())));
+ final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer1.toString())));
final TeeSinkTokenFilter.SinkTokenStream dogDetector = tee1.newSinkTokenStream(dogFilter);
final TeeSinkTokenFilter.SinkTokenStream theDetector = tee1.newSinkTokenStream(theFilter);
final TokenStream source1 = new CachingTokenFilter(tee1);
@@ -99,7 +97,7 @@
dogDetector.addAttribute(CheckClearAttributesAttribute.class);
theDetector.addAttribute(CheckClearAttributesAttribute.class);
- final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer2.toString())));
+ final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer2.toString())));
tee2.addSinkTokenStream(dogDetector);
tee2.addSinkTokenStream(theDetector);
final TokenStream source2 = tee2;
@@ -111,7 +109,7 @@
assertTokenStreamContents(dogDetector, new String[]{"Dogs", "Dogs"});
source1.reset();
- TokenStream lowerCasing = new LowerCaseFilter(Version.LUCENE_CURRENT, source1);
+ TokenStream lowerCasing = new LowerCaseFilter(TEST_VERSION_CURRENT, source1);
String[] lowerCaseTokens = new String[tokens1.length];
for (int i = 0; i < tokens1.length; i++)
lowerCaseTokens[i] = tokens1[i].toLowerCase();
@@ -133,10 +131,10 @@
buffer.append(English.intToEnglish(i).toUpperCase()).append(' ');
}
//make sure we produce the same tokens
- TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))));
+ TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))));
TokenStream sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(100));
teeStream.consumeAllTokens();
- TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))), 100);
+ TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), 100);
TermAttribute tfTok = stream.addAttribute(TermAttribute.class);
TermAttribute sinkTok = sink.addAttribute(TermAttribute.class);
for (int i=0; stream.incrementToken(); i++) {
@@ -149,12 +147,12 @@
int tfPos = 0;
long start = System.currentTimeMillis();
for (int i = 0; i < 20; i++) {
- stream = new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString())));
+ stream = new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString())));
PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
while (stream.incrementToken()) {
tfPos += posIncrAtt.getPositionIncrement();
}
- stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))), modCounts[j]);
+ stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), modCounts[j]);
posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
while (stream.incrementToken()) {
tfPos += posIncrAtt.getPositionIncrement();
@@ -166,7 +164,7 @@
//simulate one field with one sink
start = System.currentTimeMillis();
for (int i = 0; i < 20; i++) {
- teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))));
+ teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))));
sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(modCounts[j]));
PositionIncrementAttribute posIncrAtt = teeStream.getAttribute(PositionIncrementAttribute.class);
while (teeStream.incrementToken()) {
Modified: lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/collation/CollationTestBase.java Wed Feb 10 13:35:57 2010
@@ -18,7 +18,6 @@
*/
-import junit.framework.TestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -38,14 +37,14 @@
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Document;
import org.apache.lucene.util.IndexableBinaryStringTools;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import java.nio.CharBuffer;
import java.nio.ByteBuffer;
-public class CollationTestBase extends TestCase {
+public class CollationTestBase extends LuceneTestCase {
protected String firstRangeBeginningOriginal = "\u062F";
protected String firstRangeEndOriginal = "\u0698";
@@ -179,7 +178,7 @@
String usResult) throws Exception {
RAMDirectory indexStore = new RAMDirectory();
PerFieldAnalyzerWrapper analyzer
- = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.addAnalyzer("US", usAnalyzer);
analyzer.addAnalyzer("France", franceAnalyzer);
analyzer.addAnalyzer("Sweden", swedenAnalyzer);
Modified: lucene/java/trunk/src/test/org/apache/lucene/document/TestBinaryDocument.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/document/TestBinaryDocument.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/document/TestBinaryDocument.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/document/TestBinaryDocument.java Wed Feb 10 13:35:57 2010
@@ -59,7 +59,7 @@
/** add the doc to a ram index */
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc);
writer.close();
@@ -97,7 +97,7 @@
/** add the doc to a ram index */
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc);
writer.close();
Modified: lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/document/TestDocument.java Wed Feb 10 13:35:57 2010
@@ -154,7 +154,7 @@
public void testGetValuesForIndexedDocument() throws Exception
{
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(makeDocumentWithFields());
writer.close();
@@ -225,7 +225,7 @@
doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED));
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc);
field.setValue("id2");
writer.addDocument(doc);
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/DocHelper.java Wed Feb 10 13:35:57 2010
@@ -29,7 +29,7 @@
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Version;
+import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT;
class DocHelper {
public static final String FIELD_1_TEXT = "field one text";
@@ -219,7 +219,7 @@
*/
public static SegmentInfo writeDoc(Directory dir, Document doc) throws IOException
{
- return writeDoc(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), Similarity.getDefault(), doc);
+ return writeDoc(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), Similarity.getDefault(), doc);
}
/**
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java Wed Feb 10 13:35:57 2010
@@ -20,8 +20,6 @@
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -429,7 +427,7 @@
private IndexWriter newWriter(Directory dir, boolean create)
throws IOException {
- final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED);
+ final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMergePolicy(new LogDocMergePolicy(writer));
return writer;
}
@@ -503,7 +501,7 @@
public void testHangOnClose() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMergePolicy(new LogByteSizeMergePolicy(writer));
writer.setMaxBufferedDocs(5);
writer.setUseCompoundFile(false);
@@ -529,7 +527,7 @@
writer.close();
Directory dir2 = new MockRAMDirectory();
- writer = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer);
lmp.setMinMergeMB(0.0001);
writer.setMergePolicy(lmp);
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestAtomicUpdate.java Wed Feb 10 13:35:57 2010
@@ -26,7 +26,7 @@
import java.io.IOException;
public class TestAtomicUpdate extends LuceneTestCase {
- private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
private Random RANDOM;
public class MockIndexWriter extends IndexWriter {
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Wed Feb 10 13:35:57 2010
@@ -45,7 +45,6 @@
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
/*
@@ -218,7 +217,7 @@
hasTested29++;
}
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.optimize();
w.close();
@@ -273,7 +272,7 @@
}
public void searchIndex(String dirName, String oldName) throws IOException {
- //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
//Query query = parser.parse("handle:1");
dirName = fullDir(dirName);
@@ -358,7 +357,7 @@
Directory dir = FSDirectory.open(new File(dirName));
// open writer
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
// add 10 docs
for(int i=0;i<10;i++) {
@@ -402,7 +401,7 @@
searcher.close();
// optimize
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
writer.optimize();
writer.close();
@@ -452,7 +451,7 @@
searcher.close();
// optimize
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
writer.optimize();
writer.close();
@@ -474,7 +473,7 @@
dirName = fullDir(dirName);
Directory dir = FSDirectory.open(new File(dirName));
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(doCFS);
writer.setMaxBufferedDocs(10);
@@ -485,7 +484,7 @@
writer.close();
// open fresh writer so we get no prx file in the added segment
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(doCFS);
writer.setMaxBufferedDocs(10);
addNoProxDoc(writer);
@@ -512,7 +511,7 @@
try {
Directory dir = FSDirectory.open(new File(fullDir(outputDir)));
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setRAMBufferSizeMB(16.0);
for(int i=0;i<35;i++) {
addDoc(writer, i);
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestCheckIndex.java Wed Feb 10 13:35:57 2010
@@ -24,7 +24,6 @@
import java.util.ArrayList;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -35,7 +34,7 @@
public void testDeletedDocs() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
Document doc = new Document();
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java?rev=908496&r1=908495&r2=908496&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Wed Feb 10 13:35:57 2010
@@ -25,13 +25,11 @@
import org.apache.lucene.document.Field;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
public class TestConcurrentMergeScheduler extends LuceneTestCase {
- private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail;