You are viewing a plain text version of this content. The canonical link for it is here.
Posted to pylucene-commits@lucene.apache.org by va...@apache.org on 2012/07/07 17:29:38 UTC
svn commit: r1358592 [2/2] - in /lucene/pylucene/trunk: ./
java/org/apache/pylucene/analysis/ java/org/apache/pylucene/index/
java/org/apache/pylucene/queryParser/
java/org/apache/pylucene/queryParser/classic/
java/org/apache/pylucene/search/ java/org/...
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/extsearch/sorting/DistanceSortingTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/extsearch/sorting/DistanceSortingTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/extsearch/sorting/DistanceSortingTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/extsearch/sorting/DistanceSortingTest.py Sat Jul 7 15:29:35 2012
@@ -38,7 +38,7 @@ class DistanceSortingTest(TestCase):
writer.close()
- self.searcher = IndexSearcher(self.directory, True)
+ self.searcher = self.getSearcher()
self.query = TermQuery(Term("type", "restaurant"))
def addPoint(self, writer, name, type, x, y):
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/handlingtypes/framework/FileIndexer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/handlingtypes/framework/FileIndexer.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/handlingtypes/framework/FileIndexer.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/handlingtypes/framework/FileIndexer.py Sat Jul 7 15:29:35 2012
@@ -68,7 +68,7 @@ class FileIndexer(object):
writer.setUseCompoundFile(False)
numIndexed = cls.indexDirectory(writer, dataDir)
- writer.optimize()
+ writer.commit()
writer.close()
return numIndexed
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/BaseIndexingTestCase.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/BaseIndexingTestCase.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/BaseIndexingTestCase.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/BaseIndexingTestCase.py Sat Jul 7 15:29:35 2012
@@ -63,7 +63,7 @@ class BaseIndexingTestCase(TestCase):
Field.Store.YES, Field.Index.ANALYZED))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
def getAnalyzer(self):
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/CompoundVersusMultiFileIndexTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/CompoundVersusMultiFileIndexTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/CompoundVersusMultiFileIndexTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/CompoundVersusMultiFileIndexTest.py Sat Jul 7 15:29:35 2012
@@ -91,7 +91,7 @@ class CompoundVersusMultiFileIndexTest(T
Field.Store.YES, Field.Index.ANALYZED))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
def loadDocuments(self, numDocs, wordsPerDoc):
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/DocumentDeleteTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/DocumentDeleteTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/DocumentDeleteTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/DocumentDeleteTest.py Sat Jul 7 15:29:35 2012
@@ -51,7 +51,7 @@ class DocumentDeleteTest(BaseIndexingTes
writer = IndexWriter(self.dir, self.getAnalyzer(), False,
IndexWriter.MaxFieldLength.UNLIMITED)
- writer.optimize()
+ writer.commit()
writer.close()
reader = IndexReader.open(self.dir, True)
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/DocumentUpdateTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/DocumentUpdateTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/DocumentUpdateTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/DocumentUpdateTest.py Sat Jul 7 15:29:35 2012
@@ -40,7 +40,7 @@ class DocumentUpdateTest(BaseIndexingTes
doc.add(Field("city", "St. Petersburg",
Field.Store.YES, Field.Index.ANALYZED))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
self.assertEqual(0, self.getHitCount("city", "Amsterdam"))
@@ -57,6 +57,6 @@ class DocumentUpdateTest(BaseIndexingTes
t = Term(fieldName, searchString)
query = TermQuery(t)
hitCount = len(searcher.search(query, 50).scoreDocs)
- searcher.close()
+ del searcher
return hitCount
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/FSversusRAMDirectoryTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/FSversusRAMDirectoryTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/FSversusRAMDirectoryTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/FSversusRAMDirectoryTest.py Sat Jul 7 15:29:35 2012
@@ -87,7 +87,7 @@ class FSversusRAMDirectoryTest(TestCase)
Field.Store.YES, Field.Index.ANALYZED))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
def loadDocuments(self, numDocs, wordsPerDoc):
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/FieldLengthTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/FieldLengthTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/FieldLengthTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/FieldLengthTest.py Sat Jul 7 15:29:35 2012
@@ -52,7 +52,7 @@ class FieldLengthTest(TestCase):
t = Term(fieldName, searchString)
query = TermQuery(t)
hitCount = len(searcher.search(query, 50).scoreDocs)
- searcher.close()
+ del searcher
return hitCount
@@ -74,5 +74,5 @@ class FieldLengthTest(TestCase):
Field.Store.YES, Field.Index.ANALYZED))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/VerboseIndexing.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/VerboseIndexing.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/VerboseIndexing.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/indexing/VerboseIndexing.py Sat Jul 7 15:29:35 2012
@@ -40,7 +40,7 @@ class VerboseIndexing(object):
Field.Store.YES, Field.Index.UN_TOKENIZED))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
main = classmethod(main)
Propchange: lucene/pylucene/trunk/samples/LuceneInAction/lia/meetlucene/
------------------------------------------------------------------------------
Merged /lucene/pylucene/branches/pylucene_3_6/samples/LuceneInAction/lia/meetlucene:r1334275-1358561
Merged /lucene/pylucene/branches/pylucene_3_0/samples/LuceneInAction/lia/meetlucene:r950836
Merged /lucene/pylucene/branches/branch_3x/samples/LuceneInAction/lia/meetlucene:r943951-944952,944955-945799,945801-984139,984141-984145,984147-1358562
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/meetlucene/Indexer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/meetlucene/Indexer.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/meetlucene/Indexer.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/meetlucene/Indexer.py Sat Jul 7 15:29:35 2012
@@ -51,7 +51,7 @@ class Indexer(object):
cls.indexDirectory(writer, dataDir)
numIndexed = writer.numDocs()
- writer.optimize()
+ writer.commit()
writer.close()
dir.close()
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/BasicSearchingTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/BasicSearchingTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/BasicSearchingTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/BasicSearchingTest.py Sat Jul 7 15:29:35 2012
@@ -23,7 +23,7 @@ class BasicSearchingTest(LiaTestCase):
def testTerm(self):
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
t = Term("subject", "ant")
query = TermQuery(t)
scoreDocs = searcher.search(query, 50).scoreDocs
@@ -33,11 +33,11 @@ class BasicSearchingTest(LiaTestCase):
scoreDocs = searcher.search(TermQuery(t), 50).scoreDocs
self.assertEqual(2, len(scoreDocs))
- searcher.close()
+ del searcher
def testKeyword(self):
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
t = Term("isbn", "1930110995")
query = TermQuery(t)
scoreDocs = searcher.search(query, 50).scoreDocs
@@ -45,16 +45,16 @@ class BasicSearchingTest(LiaTestCase):
def testQueryParser(self):
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
- query = QueryParser(Version.LUCENE_CURRENT, "contents",
- SimpleAnalyzer()).parse("+JUNIT +ANT -MOCK")
+ query = QueryParser(self.TEST_VERSION, "contents",
+ SimpleAnalyzer(self.TEST_VERSION)).parse("+JUNIT +ANT -MOCK")
scoreDocs = searcher.search(query, 50).scoreDocs
self.assertEqual(1, len(scoreDocs))
d = searcher.doc(scoreDocs[0].doc)
self.assertEqual("Java Development with Ant", d.get("title"))
- query = QueryParser(Version.LUCENE_CURRENT, "contents",
- SimpleAnalyzer()).parse("mock OR junit")
+ query = QueryParser(self.TEST_VERSION, "contents",
+ SimpleAnalyzer(self.TEST_VERSION)).parse("mock OR junit")
scoreDocs = searcher.search(query, 50).scoreDocs
self.assertEqual(2, len(scoreDocs), "JDwA and JIA")
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/BooleanQueryTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/BooleanQueryTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/BooleanQueryTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/BooleanQueryTest.py Sat Jul 7 15:29:35 2012
@@ -33,7 +33,7 @@ class BooleanQueryTest(LiaTestCase):
searchingBooks2004.add(searchingBooks, BooleanClause.Occur.MUST)
searchingBooks2004.add(books2004, BooleanClause.Occur.MUST)
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
scoreDocs = searcher.search(searchingBooks2004, 50).scoreDocs
self.assertHitsIncludeTitle(searcher, scoreDocs, "Lucene in Action")
@@ -49,7 +49,7 @@ class BooleanQueryTest(LiaTestCase):
enlightenmentBooks.add(methodologyBooks, BooleanClause.Occur.SHOULD)
enlightenmentBooks.add(easternPhilosophyBooks, BooleanClause.Occur.SHOULD)
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
scoreDocs = searcher.search(enlightenmentBooks, 50).scoreDocs
print "or =", enlightenmentBooks
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/NumericRangeQueryTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/NumericRangeQueryTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/NumericRangeQueryTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/NumericRangeQueryTest.py Sat Jul 7 15:29:35 2012
@@ -22,7 +22,7 @@ class NumericRangeQueryTest(LiaTestCase)
def testInclusive(self):
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
# pub date of TTC was October 1988
query = NumericRangeQuery.newIntRange("pubmonth",
Integer(198805),
@@ -31,11 +31,11 @@ class NumericRangeQueryTest(LiaTestCase)
topDocs = searcher.search(query, 100)
self.assertEqual(1, topDocs.totalHits)
- searcher.close()
+ del searcher
def testExclusive(self):
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
# pub date of TTC was October 1988
query = NumericRangeQuery.newIntRange("pubmonth",
Integer(198805),
@@ -43,4 +43,4 @@ class NumericRangeQueryTest(LiaTestCase)
False, False)
topDocs = searcher.search(query, 100)
self.assertEqual(0, topDocs.totalHits)
- searcher.close()
+ del searcher
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/PrefixQueryTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/PrefixQueryTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/PrefixQueryTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/PrefixQueryTest.py Sat Jul 7 15:29:35 2012
@@ -21,7 +21,7 @@ class PrefixQueryTest(LiaTestCase):
def testPrefix(self):
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
# search for programming books, including subcategories
term = Term("category", "/technology/computers/programming")
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/QueryParserTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/QueryParserTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/QueryParserTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/QueryParserTest.py Sat Jul 7 15:29:35 2012
@@ -26,7 +26,7 @@ class QueryParserTest(LiaTestCase):
super(QueryParserTest, self).setUp()
self.analyzer = WhitespaceAnalyzer()
- self.searcher = IndexSearcher(self.directory, True)
+ self.searcher = self.getSearcher()
def testToString(self):
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/ScoreTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/ScoreTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/ScoreTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/ScoreTest.py Sat Jul 7 15:29:35 2012
@@ -69,7 +69,7 @@ class ScoreTest(LiaTestCase):
self.assertEqual(1, len(scoreDocs))
self.assertEqual(scoreDocs[0].score, 1.0)
- searcher.close()
+ del searcher
def indexSingleFieldDocs(self, fields):
@@ -81,7 +81,7 @@ class ScoreTest(LiaTestCase):
doc.add(field)
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
def testWildcard(self):
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/TermRangeQueryTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/TermRangeQueryTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/TermRangeQueryTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/searching/TermRangeQueryTest.py Sat Jul 7 15:29:35 2012
@@ -21,9 +21,9 @@ class TermRangeQueryTest(LiaTestCase):
def testTermRangeQuery(self):
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
query = TermRangeQuery("title2", "d", "j", True, True)
topDocs = searcher.search(query, 100)
self.assertEqual(3, topDocs.totalHits)
- searcher.close()
+ del searcher
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/BerkeleyDbIndexer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/BerkeleyDbIndexer.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/BerkeleyDbIndexer.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/BerkeleyDbIndexer.py Sat Jul 7 15:29:35 2012
@@ -86,7 +86,7 @@ class BerkeleyDbIndexer(object):
Field.Store.YES, Field.Index.ANALYZED))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
except:
if txn is not None:
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/BerkeleyDbSearcher.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/BerkeleyDbSearcher.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/BerkeleyDbSearcher.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/BerkeleyDbSearcher.py Sat Jul 7 15:29:35 2012
@@ -68,11 +68,11 @@ class BerkeleyDbSearcher(object):
try:
txn = env.txn_begin(None)
directory = DbDirectory(txn, index, blocks, 0)
- searcher = IndexSearcher(directory, True)
+ searcher = self.getSearcher()
topDocs = searcher.search(TermQuery(Term("contents", "fox")), 50)
print topDocs.totalHits, "document(s) found"
- searcher.close()
+ del searcher
except:
if txn is not None:
txn.abort()
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/HighlightTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/HighlightTest.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/HighlightTest.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/HighlightTest.py Sat Jul 7 15:29:35 2012
@@ -36,7 +36,7 @@ class HighlightTest(LiaTestCase):
def testHits(self):
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
query = TermQuery(Term("title", "action"))
scoreDocs = searcher.search(query, 50).scoreDocs
Modified: lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/T9er.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/T9er.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/T9er.py (original)
+++ lucene/pylucene/trunk/samples/LuceneInAction/lia/tools/T9er.py Sat Jul 7 15:29:35 2012
@@ -64,7 +64,7 @@ class T9er(object):
if id % 100 == 0:
print "Document", id
- writer.optimize()
+ writer.commit()
writer.close()
reader.close()
Modified: lucene/pylucene/trunk/samples/SearchFiles.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/SearchFiles.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/SearchFiles.py (original)
+++ lucene/pylucene/trunk/samples/SearchFiles.py Sat Jul 7 15:29:35 2012
@@ -38,7 +38,7 @@ if __name__ == '__main__':
initVM()
print 'lucene', VERSION
directory = SimpleFSDirectory(File(STORE_DIR))
- searcher = IndexSearcher(directory, True)
+ searcher = self.getSearcher()
analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
run(searcher, analyzer)
- searcher.close()
+ del searcher
Modified: lucene/pylucene/trunk/samples/TermPositionVector.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/TermPositionVector.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/TermPositionVector.py (original)
+++ lucene/pylucene/trunk/samples/TermPositionVector.py Sat Jul 7 15:29:35 2012
@@ -16,7 +16,7 @@ for t in ts:
Field.Store.YES, Field.Index.ANALYZED,
Field.TermVector.WITH_POSITIONS_OFFSETS))
iwriter.addDocument(doc)
-iwriter.optimize()
+iwriter.commit()
iwriter.close()
ireader = IndexReader.open(directory, True)
Modified: lucene/pylucene/trunk/samples/manindex.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/manindex.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/manindex.py (original)
+++ lucene/pylucene/trunk/samples/manindex.py Sat Jul 7 15:29:35 2012
@@ -96,5 +96,5 @@ if __name__ == '__main__':
path = os.path.join(dir, name)
if os.path.isdir(path):
indexDirectory(path)
- writer.optimize()
+ writer.commit()
writer.close()
Modified: lucene/pylucene/trunk/samples/mansearch.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/mansearch.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/samples/mansearch.py (original)
+++ lucene/pylucene/trunk/samples/mansearch.py Sat Jul 7 15:29:35 2012
@@ -65,7 +65,8 @@ template = CustomTemplate(format)
fsDir = SimpleFSDirectory(File(indexDir))
searcher = IndexSearcher(fsDir, True)
-parser = QueryParser("keywords", StandardAnalyzer(Version.LUCENE_CURRENT))
+analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
+parser = QueryParser(Version.LUCENE_CURRENT, "keywords", analyzer)
parser.setDefaultOperator(QueryParser.Operator.AND)
query = parser.parse(' '.join(args))
start = datetime.now()
Modified: lucene/pylucene/trunk/test/BaseTestRangeFilter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/BaseTestRangeFilter.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/BaseTestRangeFilter.py (original)
+++ lucene/pylucene/trunk/test/BaseTestRangeFilter.py Sat Jul 7 15:29:35 2012
@@ -88,7 +88,7 @@ class BaseTestRangeFilter(TestCase):
Field.Index.NOT_ANALYZED));
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
def testPad(self):
Modified: lucene/pylucene/trunk/test/BaseTokenStreamTestCase.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/BaseTokenStreamTestCase.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/BaseTokenStreamTestCase.py (original)
+++ lucene/pylucene/trunk/test/BaseTokenStreamTestCase.py Sat Jul 7 15:29:35 2012
@@ -17,35 +17,68 @@ from lucene import *
class BaseTokenStreamTestCase(TestCase):
"""
- Base class for all Lucene unit tests that use TokenStreams.
+ some helpers to test Analyzers and TokenStreams
"""
+ class CheckClearAttributesAttributeImpl(PythonAttributeImpl):
+
+ def __init__(_self):
+ super(PythonAttributeImpl, _self).__init__()
+ _self.clearCalled = False
+
+ def getAndResetClearCalled(_self):
+ try:
+ return _self.clearCalled
+ finally:
+ _self.clearCalled = False
+
+ def clear(_self):
+ _self.clearCalled = True
+
+ def equals(_self, other):
+ return (
+ CheckClearAttributesAttributeImpl.instance_(other) and
+ CheckClearAttributesAttributeImpl.cast_(other).clearCalled ==
+ _self.clearCalled)
+
+ def hashCode(_self):
+ return 76137213 ^ Boolean.valueOf(_self.clearCalled).hashCode()
+
+ def copyTo(_self, target):
+ CheckClearAttributesAttributeImpl.cast_(target).clear()
+
+
def _assertTokenStreamContents(self, ts, output,
startOffsets=None, endOffsets=None,
- types=None, posIncrements=None):
+ types=None, posIncrements=None,
+ finalOffset=None):
+
+ #checkClearAtt = ts.addAttribute(PythonAttribute.class_);
self.assert_(output is not None)
self.assert_(ts.hasAttribute(CharTermAttribute.class_),
- "has TermAttribute")
+ "has no CharTermAttribute")
termAtt = ts.getAttribute(CharTermAttribute.class_)
offsetAtt = None
- if startOffsets is not None or endOffsets is not None:
+ if (startOffsets is not None or
+ endOffsets is not None or
+ finalOffset is not None):
self.assert_(ts.hasAttribute(OffsetAttribute.class_),
- "has OffsetAttribute")
+ "has no OffsetAttribute")
offsetAtt = ts.getAttribute(OffsetAttribute.class_)
typeAtt = None
if types is not None:
self.assert_(ts.hasAttribute(TypeAttribute.class_),
- "has TypeAttribute")
+ "has no TypeAttribute")
typeAtt = ts.getAttribute(TypeAttribute.class_)
posIncrAtt = None
if posIncrements is not None:
self.assert_(ts.hasAttribute(PositionIncrementAttribute.class_),
- "has PositionIncrementAttribute")
+ "has no PositionIncrementAttribute")
posIncrAtt = ts.getAttribute(PositionIncrementAttribute.class_)
ts.reset()
@@ -53,7 +86,7 @@ class BaseTokenStreamTestCase(TestCase):
# extra safety to enforce, that the state is not preserved and
# also assign bogus values
ts.clearAttributes()
- termAtt.append("bogusTerm")
+ termAtt.setEmpty().append("bogusTerm")
if offsetAtt is not None:
offsetAtt.setOffset(14584724, 24683243)
if typeAtt is not None:
Modified: lucene/pylucene/trunk/test/test_Analyzers.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_Analyzers.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_Analyzers.py (original)
+++ lucene/pylucene/trunk/test/test_Analyzers.py Sat Jul 7 15:29:35 2012
@@ -79,7 +79,7 @@ class AnalyzersTestCase(BaseTokenStreamT
b += 1
if not ts.incrementToken():
break
- self.assertEqual(b, payloadAtt.getPayload().toByteArray()[0])
+ self.assertEqual(b, payloadAtt.getPayload().bytes[0])
# Make sure old style next() calls result in a new copy of payloads
def testPayloadCopy(self):
@@ -102,7 +102,7 @@ class PayloadSetter(PythonTokenFilter):
self.input = input
self.payloadAtt = self.addAttribute(PayloadAttribute.class_)
self.data = JArray('byte')(1)
- self.p = Payload(self.data, 0, 1)
+ self.p = BytesRef(self.data, 0, 1)
def incrementToken(self):
Modified: lucene/pylucene/trunk/test/test_Binary.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_Binary.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_Binary.py (original)
+++ lucene/pylucene/trunk/test/test_Binary.py Sat Jul 7 15:29:35 2012
@@ -15,15 +15,15 @@
# test PyLucene binary field
from unittest import TestCase, main
-from lucene import Field, JArray
+from lucene import StoredField, JArray
class BinaryTestCase(TestCase):
def binary(self, b):
c = JArray('byte')(b)
- field = Field("bin", c)
- v = field.binaryValue
+ field = StoredField("bin", c)
+ v = field.binaryValue().bytes
assert c == v and b == [a for a in v]
def testBinary(self):
Modified: lucene/pylucene/trunk/test/test_BinaryDocument.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_BinaryDocument.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_BinaryDocument.py (original)
+++ lucene/pylucene/trunk/test/test_BinaryDocument.py Sat Jul 7 15:29:35 2012
@@ -12,11 +12,12 @@
# limitations under the License.
# ====================================================================
+from pylucene_testcase import PyLuceneTestCase
from unittest import TestCase, main
from lucene import *
-class TestBinaryDocument(TestCase):
+class TestBinaryDocument(PyLuceneTestCase):
binaryValStored = "this text will be stored as a byte array in the index"
binaryValCompressed = "this text will be also stored and compressed as a byte array in the index"
@@ -24,36 +25,41 @@ class TestBinaryDocument(TestCase):
def testBinaryFieldInIndex(self):
bytes = JArray('byte')(self.binaryValStored)
- binaryFldStored = Field("binaryStored", bytes)
- stringFldStored = Field("stringStored", self.binaryValStored,
- Field.Store.YES, Field.Index.NO,
- Field.TermVector.NO)
-
- try:
- # binary fields with store off are not allowed
- Field("fail", bytes, Field.Store.NO)
- self.fail()
- except JavaError, e:
- self.assertEqual(e.getJavaException().getClass().getName(),
- 'java.lang.IllegalArgumentException')
+ binaryFldStored = StoredField("binaryStored", bytes)
+ ft = FieldType()
+ ft.setStored(True)
+ ft.setIndexed(False)
+ ft.setStoreTermVectors(False)
+ stringFldStored = Field("stringStored", self.binaryValStored, ft)
+
+
+ # couldn't find any combination with lucene4.0 where it would raise errors
+ #try:
+ # # binary fields with store off are not allowed
+ # Field("fail", bytes, Field.Store.NO)
+ # self.fail()
+ #except JavaError, e:
+ # self.assertEqual(e.getJavaException().getClass().getName(),
+ # 'java.lang.IllegalArgumentException')
+
doc = Document()
doc.add(binaryFldStored)
doc.add(stringFldStored)
+
+
# test for field count
self.assertEqual(2, doc.fields.size())
# add the doc to a ram index
- dir = RAMDirectory()
- writer = IndexWriter(dir, StandardAnalyzer(Version.LUCENE_CURRENT),
- True, IndexWriter.MaxFieldLength.LIMITED)
+ writer = self.getWriter(analyzer=StandardAnalyzer(Version.LUCENE_CURRENT))
writer.addDocument(doc)
writer.close()
# open a reader and fetch the document
- reader = IndexReader.open(dir, False)
- docFromReader = reader.document(0)
+ reader = self.getReader()
+ docFromReader = reader.document(0) #segfault
self.assert_(docFromReader is not None)
# fetch the binary stored field and compare it's content with the
Modified: lucene/pylucene/trunk/test/test_DocBoost.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_DocBoost.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_DocBoost.py (original)
+++ lucene/pylucene/trunk/test/test_DocBoost.py Sat Jul 7 15:29:35 2012
@@ -47,7 +47,7 @@ class DocBoostTestCase(TestCase):
writer.addDocument(d2)
writer.addDocument(d3)
writer.addDocument(d4)
- writer.optimize()
+ writer.commit()
writer.close()
scores = [0.0] * 4
Modified: lucene/pylucene/trunk/test/test_FilteredQuery.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_FilteredQuery.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_FilteredQuery.py (original)
+++ lucene/pylucene/trunk/test/test_FilteredQuery.py Sat Jul 7 15:29:35 2012
@@ -59,10 +59,10 @@ class FilteredQueryTestCase(TestCase):
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
- self.searcher = IndexSearcher(self.directory, True)
+ self.searcher = self.getSearcher()
self.query = TermQuery(Term("field", "three"))
class filter(PythonFilter):
@@ -76,7 +76,7 @@ class FilteredQueryTestCase(TestCase):
def tearDown(self):
- self.searcher.close()
+ del self.searcher
self.directory.close()
def testFilteredQuery(self):
Modified: lucene/pylucene/trunk/test/test_FuzzyQuery.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_FuzzyQuery.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_FuzzyQuery.py (original)
+++ lucene/pylucene/trunk/test/test_FuzzyQuery.py Sat Jul 7 15:29:35 2012
@@ -40,10 +40,10 @@ class FuzzyQueryTestCase(TestCase):
self._addDoc("abbbb", writer)
self._addDoc("bbbbb", writer)
self._addDoc("ddddd", writer)
- writer.optimize()
+ writer.commit()
writer.close()
- searcher = IndexSearcher(directory, True)
+ searcher = self.getSearcher()
query = FuzzyQuery(Term("field", "aaaaa"))
topDocs = searcher.search(query, 50)
@@ -85,7 +85,7 @@ class FuzzyQueryTestCase(TestCase):
topDocs = searcher.search(query, 50)
self.assertEqual(0, topDocs.totalHits)
- searcher.close()
+ del searcher
directory.close()
def testDefaultFuzzinessLong(self):
@@ -95,9 +95,9 @@ class FuzzyQueryTestCase(TestCase):
IndexWriter.MaxFieldLength.LIMITED)
self._addDoc("aaaaaaa", writer)
self._addDoc("segment", writer)
- writer.optimize()
+ writer.commit()
writer.close()
- searcher = IndexSearcher(directory, True)
+ searcher = self.getSearcher()
# not similar enough:
query = FuzzyQuery(Term("field", "xxxxx"))
@@ -124,7 +124,7 @@ class FuzzyQueryTestCase(TestCase):
topDocs = searcher.search(query, 50)
self.assertEqual(1, topDocs.totalHits)
- searcher.close()
+ del searcher
directory.close()
Modified: lucene/pylucene/trunk/test/test_Highlighter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_Highlighter.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_Highlighter.py (original)
+++ lucene/pylucene/trunk/test/test_Highlighter.py Sat Jul 7 15:29:35 2012
@@ -122,7 +122,7 @@ class HighlighterTestCase(TestCase):
for text in self.texts:
self.addDoc(writer, text)
- writer.optimize()
+ writer.commit()
writer.close()
self.reader = IndexReader.open(self.ramDir, True)
self.numHighlights = 0;
Modified: lucene/pylucene/trunk/test/test_Not.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_Not.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_Not.py (original)
+++ lucene/pylucene/trunk/test/test_Not.py Sat Jul 7 15:29:35 2012
@@ -31,7 +31,7 @@ class NotTestCase(TestCase):
d1.add(Field("field", "a b", Field.Store.YES, Field.Index.ANALYZED))
writer.addDocument(d1)
- writer.optimize()
+ writer.commit()
writer.close()
searcher = IndexSearcher(store, True)
Modified: lucene/pylucene/trunk/test/test_PhraseQuery.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_PhraseQuery.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_PhraseQuery.py (original)
+++ lucene/pylucene/trunk/test/test_PhraseQuery.py Sat Jul 7 15:29:35 2012
@@ -24,24 +24,28 @@ class PhraseQueryTestCase(TestCase):
def setUp(self):
self.directory = RAMDirectory()
- writer = IndexWriter(self.directory, WhitespaceAnalyzer(), True,
- IndexWriter.MaxFieldLength.LIMITED)
+ writer = self.getWriter()
doc = Document()
doc.add(Field("field", "one two three four five",
- Field.Store.YES, Field.Index.ANALYZED))
+ TextField.TYPE_STORED))
writer.addDocument(doc)
- writer.optimize()
writer.close()
-
- self.searcher = IndexSearcher(self.directory, True)
+
+ self.reader = DirectoryReader.open(writer.getDirectory())
+ self.searcher = IndexSearcher(self.reader)
self.query = PhraseQuery()
def tearDown(self):
- self.searcher.close()
+ self.reader.close()
self.directory.close()
+
+ def getWriter(self, directory=None, analyzer=None):
+ return IndexWriter(directory or self.directory, IndexWriterConfig(Version.LUCENE_CURRENT,
+ LimitTokenCountAnalyzer(analyzer or WhitespaceAnalyzer(Version.LUCENE_CURRENT), 10000))
+ .setOpenMode(IndexWriterConfig.OpenMode.CREATE))
def testNotCloseEnough(self):
@@ -140,16 +144,16 @@ class PhraseQueryTestCase(TestCase):
def testPhraseQueryWithStopAnalyzer(self):
directory = RAMDirectory()
- stopAnalyzer = StopAnalyzer(Version.LUCENE_24)
- writer = IndexWriter(directory, stopAnalyzer, True,
- IndexWriter.MaxFieldLength.LIMITED)
+ stopAnalyzer = StopAnalyzer(Version.LUCENE_CURRENT)
+ writer = self.getWriter(directory, stopAnalyzer)
doc = Document()
doc.add(Field("field", "the stop words are here",
- Field.Store.YES, Field.Index.ANALYZED))
+ TextField.TYPE_STORED))
writer.addDocument(doc)
writer.close()
- searcher = IndexSearcher(directory, True)
+ reader = DirectoryReader.open(writer.getDirectory())
+ searcher = IndexSearcher(reader)
# valid exact phrase query
query = PhraseQuery()
@@ -165,33 +169,28 @@ class PhraseQueryTestCase(TestCase):
topDocs = searcher.search(query, 50)
self.assertEqual(1, topDocs.totalHits)
- searcher.close()
def testPhraseQueryInConjunctionScorer(self):
directory = RAMDirectory()
- writer = IndexWriter(directory, WhitespaceAnalyzer(), True,
- IndexWriter.MaxFieldLength.LIMITED)
+ writer = self.getWriter()
doc = Document()
doc.add(Field("source", "marketing info",
- Field.Store.YES, Field.Index.ANALYZED,
- Field.TermVector.YES))
+ TextField.TYPE_STORED))
writer.addDocument(doc)
doc = Document()
doc.add(Field("contents", "foobar",
- Field.Store.YES, Field.Index.ANALYZED,
- Field.TermVector.YES))
+ TextField.TYPE_STORED))
doc.add(Field("source", "marketing info",
- Field.Store.YES, Field.Index.ANALYZED,
- Field.TermVector.YES))
+ TextField.TYPE_STORED))
writer.addDocument(doc)
- writer.optimize()
writer.close()
-
- searcher = IndexSearcher(directory, True)
+
+ reader = DirectoryReader.open(writer.getDirectory())
+ searcher = IndexSearcher(reader)
phraseQuery = PhraseQuery()
phraseQuery.add(Term("source", "marketing"))
@@ -206,32 +205,29 @@ class PhraseQueryTestCase(TestCase):
topDocs = searcher.search(booleanQuery, 50)
self.assertEqual(1, topDocs.totalHits)
- searcher.close()
+ reader.close()
- writer = IndexWriter(directory, WhitespaceAnalyzer(), True,
- IndexWriter.MaxFieldLength.LIMITED)
+ writer = self.getWriter(directory)
+
doc = Document()
doc.add(Field("contents", "map entry woo",
- Field.Store.YES, Field.Index.ANALYZED,
- Field.TermVector.YES))
+ TextField.TYPE_STORED))
writer.addDocument(doc)
doc = Document()
doc.add(Field("contents", "woo map entry",
- Field.Store.YES, Field.Index.ANALYZED,
- Field.TermVector.YES))
+ TextField.TYPE_STORED))
writer.addDocument(doc)
doc = Document()
doc.add(Field("contents", "map foobarword entry woo",
- Field.Store.YES, Field.Index.ANALYZED,
- Field.TermVector.YES))
+ TextField.TYPE_STORED))
writer.addDocument(doc)
- writer.optimize()
writer.close()
-
- searcher = IndexSearcher(directory, True)
+
+ reader = DirectoryReader.open(writer.getDirectory())
+ searcher = IndexSearcher(reader)
termQuery = TermQuery(Term("contents", "woo"))
phraseQuery = PhraseQuery()
@@ -255,7 +251,6 @@ class PhraseQueryTestCase(TestCase):
topDocs = searcher.search(booleanQuery, 50)
self.assertEqual(2, topDocs.totalHits)
- searcher.close()
directory.close()
Modified: lucene/pylucene/trunk/test/test_PositionIncrement.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_PositionIncrement.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_PositionIncrement.py (original)
+++ lucene/pylucene/trunk/test/test_PositionIncrement.py Sat Jul 7 15:29:35 2012
@@ -60,7 +60,7 @@ class PositionIncrementTestCase(TestCase
d.add(Field("field", "bogus",
Field.Store.YES, Field.Index.ANALYZED))
writer.addDocument(d)
- writer.optimize()
+ writer.commit()
writer.close()
searcher = IndexSearcher(store, True)
Modified: lucene/pylucene/trunk/test/test_PrefixFilter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_PrefixFilter.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_PrefixFilter.py (original)
+++ lucene/pylucene/trunk/test/test_PrefixFilter.py Sat Jul 7 15:29:35 2012
@@ -44,7 +44,7 @@ class PrefixFilterTestCase(TestCase):
# PrefixFilter combined with ConstantScoreQuery
filter = PrefixFilter(Term("category", "/Computers"))
query = ConstantScoreQuery(filter)
- searcher = IndexSearcher(directory, True)
+ searcher = self.getSearcher()
topDocs = searcher.search(query, 50)
self.assertEqual(4, topDocs.totalHits,
"All documents in /Computers category and below")
Modified: lucene/pylucene/trunk/test/test_PrefixQuery.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_PrefixQuery.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_PrefixQuery.py (original)
+++ lucene/pylucene/trunk/test/test_PrefixQuery.py Sat Jul 7 15:29:35 2012
@@ -37,7 +37,7 @@ class PrefixQueryTestCase(TestCase):
writer.close()
query = PrefixQuery(Term("category", "/Computers"))
- searcher = IndexSearcher(directory, True)
+ searcher = self.getSearcher()
topDocs = searcher.search(query, 50)
self.assertEqual(3, topDocs.totalHits,
"All documents in /Computers category and below")
Modified: lucene/pylucene/trunk/test/test_PyLucene.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_PyLucene.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_PyLucene.py (original)
+++ lucene/pylucene/trunk/test/test_PyLucene.py Sat Jul 7 15:29:35 2012
@@ -258,7 +258,7 @@ class Test_PyLuceneBase(object):
finally:
self.closeStore(store, reader)
- def test_getFieldNames(self):
+ def test_getFieldInfos(self):
self.test_indexDocument()
@@ -266,20 +266,18 @@ class Test_PyLuceneBase(object):
reader = None
try:
reader = IndexReader.open(store, True)
- fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL)
- for fieldName in fieldNames:
- self.assert_(fieldName in ['owner', 'search_name', 'meta_words',
- 'docid', 'title'])
+ fieldInfos = ReaderUtil.getMergedFieldInfos(reader)
+ for fieldInfo in fieldInfos.iterator():
+ self.assert_(fieldInfo.name in ['owner', 'search_name',
+ 'meta_words', 'docid', 'title'])
- fieldNames = reader.getFieldNames(IndexReader.FieldOption.INDEXED)
- for fieldName in fieldNames:
- self.assert_(fieldName in ['owner', 'meta_words',
- 'docid', 'title'])
-
- fieldNames = reader.getFieldNames(IndexReader.FieldOption.INDEXED_NO_TERMVECTOR)
- for fieldName in fieldNames:
- self.assert_(fieldName in ['owner', 'meta_words',
- 'docid', 'title'])
+ if fieldInfo.isIndexed:
+ self.assert_(fieldInfo.name in ['owner', 'meta_words',
+ 'docid', 'title'])
+
+ if fieldInfo.isIndexed and not fieldInfo.storeTermVector:
+ self.assert_(fieldInfo.name in ['owner', 'meta_words',
+ 'docid', 'title'])
finally:
store = self.closeStore(store, reader)
Modified: lucene/pylucene/trunk/test/test_PyLuceneThread.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_PyLuceneThread.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_PyLuceneThread.py (original)
+++ lucene/pylucene/trunk/test/test_PyLuceneThread.py Sat Jul 7 15:29:35 2012
@@ -48,7 +48,7 @@ class PyLuceneThreadTestCase(TestCase):
writer.addDocument(doc2)
writer.addDocument(doc3)
writer.addDocument(doc4)
- writer.optimize()
+ writer.commit()
writer.close()
self.testData = [('one',2), ('two',1), ('three', 1), ('five', 0)] * 500
@@ -99,7 +99,7 @@ class PyLuceneThreadTestCase(TestCase):
getVMEnv().attachCurrentThread()
time.sleep(0.5)
- searcher = IndexSearcher(self.directory, True)
+ searcher = self.getSearcher()
try:
self.query = PhraseQuery()
for word, count in self.testData[0:runCount]:
@@ -111,7 +111,7 @@ class PyLuceneThreadTestCase(TestCase):
self.totalQueries += 1
self.lock.release()
finally:
- searcher.close()
+ del searcher
if __name__ == "__main__":
Modified: lucene/pylucene/trunk/test/test_RegexQuery.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_RegexQuery.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_RegexQuery.py (original)
+++ lucene/pylucene/trunk/test/test_RegexQuery.py Sat Jul 7 15:29:35 2012
@@ -14,28 +14,25 @@
from unittest import TestCase, main
from lucene import *
+from pylucene_testcase import PyLuceneTestCase
-
-class TestRegexQuery(TestCase):
+class TestRegexQuery(PyLuceneTestCase):
FN = "field"
def setUp(self):
-
- directory = RAMDirectory()
-
- writer = IndexWriter(directory, SimpleAnalyzer(), True,
- IndexWriter.MaxFieldLength.LIMITED)
+ PyLuceneTestCase.setUp(self)
+ writer = self.getWriter(analyzer=SimpleAnalyzer(self.TEST_VERSION))
doc = Document()
- doc.add(Field(self.FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED))
+ doc.add(Field(self.FN, "the quick brown fox jumps over the lazy dog", TextField.TYPE_NOT_STORED))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
- self.searcher = IndexSearcher(directory, True)
+ self.searcher = self.getSearcher()
def tearDown(self):
- self.searcher.close()
+ del self.searcher
def newTerm(self, value):
@@ -49,8 +46,8 @@ class TestRegexQuery(TestCase):
def spanRegexQueryNrHits(self, regex1, regex2, slop, ordered):
- srq1 = SpanRegexQuery(self.newTerm(regex1))
- srq2 = SpanRegexQuery(self.newTerm(regex2))
+ srq1 = SpanMultiTermQueryWrapper(RegexQuery(self.newTerm(regex1)))
+ srq2 = SpanMultiTermQueryWrapper(RegexQuery(self.newTerm(regex2)))
query = SpanNearQuery([srq1, srq2], slop, ordered)
return self.searcher.search(query, 50).totalHits
Modified: lucene/pylucene/trunk/test/test_Similarity.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_Similarity.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_Similarity.py (original)
+++ lucene/pylucene/trunk/test/test_Similarity.py Sat Jul 7 15:29:35 2012
@@ -18,7 +18,7 @@ from lucene import *
class SimpleSimilarity(PythonSimilarity):
- def lengthNorm(self, field, numTerms):
+ def computeNorm(self, field, state):
return 1.0
def queryNorm(self, sumOfSquaredWeights):
@@ -64,7 +64,7 @@ class SimilarityTestCase(TestCase):
writer.addDocument(d1)
writer.addDocument(d2)
- writer.optimize()
+ writer.commit()
writer.close()
searcher = IndexSearcher(store, True)
Modified: lucene/pylucene/trunk/test/test_Sort.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_Sort.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_Sort.py (original)
+++ lucene/pylucene/trunk/test/test_Sort.py Sat Jul 7 15:29:35 2012
@@ -97,7 +97,7 @@ class SortTestCase(TestCase):
Field.Index.NOT_ANALYZED))
doc.setBoost(2.0) # produce some scores above 1.0
writer.addDocument(doc)
- # writer.optimize()
+ # writer.commit()
writer.close()
s = IndexSearcher(indexStore, True)
s.setDefaultFieldSortScoring(True, True)
@@ -131,7 +131,7 @@ class SortTestCase(TestCase):
writer.setMaxBufferedDocs(self.getRandomNumber(2, 12))
writer.addDocument(doc)
- # writer.optimize()
+ # writer.commit()
# print writer.getSegmentCount()
writer.close()
@@ -297,27 +297,27 @@ class SortTestCase(TestCase):
class intParser(PythonIntParser):
def parseInt(_self, val):
- return (val.bytes[0] - ord('A')) * 123456
+ return (ord(val[0]) - ord('A')) * 123456
class floatParser(PythonFloatParser):
def parseFloat(_self, val):
- return math.sqrt(val.bytes[0])
+ return math.sqrt(ord(val[0]))
class longParser(PythonLongParser):
def parseLong(_self, val):
- return (val.bytes[0] - ord('A')) * 1234567890L
+ return (ord(val[0]) - ord('A')) * 1234567890L
class doubleParser(PythonDoubleParser):
def parseDouble(_self, val):
- return math.pow(val.bytes[0], val.bytes[0] - ord('A'))
+ return math.pow(ord(val[0]), ord(val[0]) - ord('A'))
class byteParser(PythonByteParser):
def parseByte(_self, val):
- return chr(val.bytes[0] - ord('A'))
+ return chr(ord(val[0]) - ord('A'))
class shortParser(PythonShortParser):
def parseShort(_self, val):
- return val.bytes[0] - ord('A')
+ return ord(val[0]) - ord('A')
sort = Sort()
sort.setSort([SortField("parser", intParser()),
@@ -936,7 +936,7 @@ class SortTestCase(TestCase):
"""
# ScoreDoc[] result = searcher.search(query, None, 1000, sort).scoreDocs
- hits = searcher.search(query, None, len(expectedResult), sort)
+ hits = searcher.search(query, None, len(expectedResult) or 1, sort)
sds = hits.scoreDocs
self.assertEqual(hits.totalHits, len(expectedResult))
@@ -1004,7 +1004,7 @@ class MyFieldComparator(PythonFieldCompa
class intParser(PythonIntParser):
def parseInt(_self, val):
- return (val.bytes[0] - ord('A')) * 123456
+ return (ord(val[0]) - ord('A')) * 123456
self.docValues = FieldCache.DEFAULT.getInts(context.reader, "parser",
intParser())
Modified: lucene/pylucene/trunk/test/test_TermRangeFilter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_TermRangeFilter.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_TermRangeFilter.py (original)
+++ lucene/pylucene/trunk/test/test_TermRangeFilter.py Sat Jul 7 15:29:35 2012
@@ -399,7 +399,7 @@ class TestTermRangeFilter(BaseTestRangeF
Field.Store.YES, Field.Index.NOT_ANALYZED))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
reader = IndexReader.open(farsiIndex, True)
@@ -441,7 +441,7 @@ class TestTermRangeFilter(BaseTestRangeF
Field.Index.NOT_ANALYZED))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
reader = IndexReader.open(danishIndex, True)
Modified: lucene/pylucene/trunk/test/test_TermRangeQuery.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_TermRangeQuery.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_TermRangeQuery.py (original)
+++ lucene/pylucene/trunk/test/test_TermRangeQuery.py Sat Jul 7 15:29:35 2012
@@ -62,21 +62,21 @@ class TermRangeQueryTestCase(TestCase):
topDocs = searcher.search(query, 50)
self.assertEqual(1, topDocs.totalHits,
"A,B,C,D, only B in range")
- searcher.close()
+ del searcher
self._initializeIndex(["A", "B", "D"])
searcher = IndexSearcher(self.dir, True)
topDocs = searcher.search(query, 50)
self.assertEqual(1, topDocs.totalHits,
"A,B,D, only B in range")
- searcher.close()
+ del searcher
self._addDoc("C")
searcher = IndexSearcher(self.dir, True)
topDocs = searcher.search(query, 50)
self.assertEqual(1, topDocs.totalHits,
"C added, still only B in range")
- searcher.close()
+ del searcher
def testInclusive(self):
@@ -87,21 +87,21 @@ class TermRangeQueryTestCase(TestCase):
topDocs = searcher.search(query, 50)
self.assertEqual(3, topDocs.totalHits,
"A,B,C,D - A,B,C in range")
- searcher.close()
+ del searcher
self._initializeIndex(["A", "B", "D"])
searcher = IndexSearcher(self.dir, True)
topDocs = searcher.search(query, 50)
self.assertEqual(2, topDocs.totalHits,
"A,B,D - A and B in range")
- searcher.close()
+ del searcher
self._addDoc("C")
searcher = IndexSearcher(self.dir, True)
topDocs = searcher.search(query, 50)
self.assertEqual(3, topDocs.totalHits,
"C added - A, B, C in range")
- searcher.close()
+ del searcher
if __name__ == "__main__":
Modified: lucene/pylucene/trunk/test/test_ThaiAnalyzer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_ThaiAnalyzer.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_ThaiAnalyzer.py (original)
+++ lucene/pylucene/trunk/test/test_ThaiAnalyzer.py Sat Jul 7 15:29:35 2012
@@ -23,7 +23,6 @@ class ThaiAnalyzerTestCase(BaseTokenStre
def testOffsets(self):
self.assert_(ThaiWordFilter.DBBI_AVAILABLE,
"JRE does not support Thai dictionary-based BreakIterator")
-
self._assertAnalyzesTo(ThaiAnalyzer(Version.LUCENE_CURRENT),
u"à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
[ u"à¸à¸²à¸£", u"à¸à¸µà¹", u"à¹à¸à¹", u"à¸à¹à¸à¸", u"à¹à¸ªà¸à¸",
@@ -51,6 +50,49 @@ class ThaiAnalyzerTestCase(BaseTokenStre
"JRE does not support Thai dictionary-based BreakIterator")
analyzer = ThaiAnalyzer(Version.LUCENE_CURRENT)
+=======
+ def testTokenType(self):
+ self.assert_(ThaiWordFilter.DBBI_AVAILABLE,
+ "JRE does not support Thai dictionary-based BreakIterator")
+
+ self._assertAnalyzesTo(ThaiAnalyzer(Version.LUCENE_35),
+ u"à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ à¹à¹à¹",
+ [ u"à¸à¸²à¸£", u"à¸à¸µà¹", u"à¹à¸à¹", u"à¸à¹à¸à¸", u"à¹à¸ªà¸à¸",
+ u"วà¹à¸²", u"à¸à¸²à¸", u"à¸à¸µ", u"à¹à¹à¹" ],
+ None, None,
+ [ "<SOUTHEAST_ASIAN>", "<SOUTHEAST_ASIAN>",
+ "<SOUTHEAST_ASIAN>", "<SOUTHEAST_ASIAN>",
+ "<SOUTHEAST_ASIAN>", "<SOUTHEAST_ASIAN>",
+ "<SOUTHEAST_ASIAN>", "<SOUTHEAST_ASIAN>",
+ "<NUM>" ])
+
+ def testPositionIncrements(self):
+ self.assert_(ThaiWordFilter.DBBI_AVAILABLE,
+ "JRE does not support Thai dictionary-based BreakIterator")
+
+ analyzer = ThaiAnalyzer(Version.LUCENE_35)
+
+ self._assertAnalyzesTo(analyzer, u"à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸ the à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
+ [ u"à¸à¸²à¸£", u"à¸à¸µà¹", u"à¹à¸à¹", u"à¸à¹à¸à¸", u"à¹à¸ªà¸à¸",
+ u"วà¹à¸²", u"à¸à¸²à¸", u"à¸à¸µ" ],
+ [ 0, 3, 6, 9, 18, 22, 25, 28 ],
+ [ 3, 6, 9, 13, 22, 25, 28, 30 ],
+ None,
+ [ 1, 1, 1, 1, 2, 1, 1, 1 ])
+
+ # case that a stopword is adjacent to thai text, with no whitespace
+ self._assertAnalyzesTo(analyzer, u"à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸the à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
+ [ u"à¸à¸²à¸£", u"à¸à¸µà¹", u"à¹à¸à¹", u"à¸à¹à¸à¸", u"à¹à¸ªà¸à¸",
+ u"วà¹à¸²", u"à¸à¸²à¸", u"à¸à¸µ" ],
+ [ 0, 3, 6, 9, 17, 21, 24, 27 ],
+ [ 3, 6, 9, 13, 21, 24, 27, 29 ],
+ None,
+ [ 1, 1, 1, 1, 2, 1, 1, 1 ])
+
+ def testAnalyzer30(self):
+
+ analyzer = ThaiAnalyzer(Version.LUCENE_30)
+>>>>>>> .merge-right.r1358562
self._assertAnalyzesTo(analyzer, u"à¸à¸²à¸£à¸à¸µà¹à¹à¸à¹à¸à¹à¸à¸ the à¹à¸ªà¸à¸à¸§à¹à¸²à¸à¸²à¸à¸à¸µ",
[ u"à¸à¸²à¸£", u"à¸à¸µà¹", u"à¹à¸à¹", u"à¸à¹à¸à¸", u"à¹à¸ªà¸à¸",
@@ -94,12 +136,15 @@ class ThaiAnalyzerTestCase(BaseTokenStre
if __name__ == "__main__":
import sys, lucene
lucene.initVM()
- if '-loop' in sys.argv:
- sys.argv.remove('-loop')
- while True:
- try:
- main()
- except:
- pass
+ if ThaiWordFilter.DBBI_AVAILABLE:
+ if '-loop' in sys.argv:
+ sys.argv.remove('-loop')
+ while True:
+ try:
+ main()
+ except:
+ pass
+ else:
+ main()
else:
- main()
+ print >>sys.stderr, "Thai not supported by this JVM, tests skipped"
Modified: lucene/pylucene/trunk/test/test_bug1564.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/trunk/test/test_bug1564.py?rev=1358592&r1=1358591&r2=1358592&view=diff
==============================================================================
--- lucene/pylucene/trunk/test/test_bug1564.py (original)
+++ lucene/pylucene/trunk/test/test_bug1564.py Sat Jul 7 15:29:35 2012
@@ -32,7 +32,7 @@ class Test_Bug1564(unittest.TestCase):
Field.Store.NO, Field.Index.ANALYZED))
doc.add(Field('id', '1', Field.Store.YES, Field.Index.NO))
writer.addDocument(doc)
- writer.optimize()
+ writer.commit()
writer.close()
def tearDown(self):