You are viewing a plain text version of this content. The canonical link for it is here.
Posted to pylucene-commits@lucene.apache.org by va...@apache.org on 2010/07/12 21:33:47 UTC
svn commit: r963446 [1/2] - in /lucene/pylucene/branches/python_3: ./
samples/LuceneInAction/ samples/LuceneInAction/lia/advsearching/
samples/LuceneInAction/lia/analysis/
samples/LuceneInAction/lia/analysis/keyword/
samples/LuceneInAction/lia/analysis...
Author: vajda
Date: Mon Jul 12 19:33:45 2010
New Revision: 963446
URL: http://svn.apache.org/viewvc?rev=963446&view=rev
Log:
- ported PyLucene to Python 3.1.2, all tests pass
Modified:
lucene/pylucene/branches/python_3/Makefile
lucene/pylucene/branches/python_3/samples/LuceneInAction/index.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/BooksLikeThis.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/CategorizerTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/MultiPhraseQueryTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/SortingExample.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/SpanQueryTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/AnalyzerDemo.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/AnalyzerUtils.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/keyword/KeywordAnalyzer.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/positional/PositionalPorterStopAnalyzerTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/positional/PositionalStopFilter.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/synonym/SynonymAnalyzerTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/common/LiaTestCase.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/common/TestDataDocumentHandler.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/collector/BookLinkCollector.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/filters/SpecialsFilter.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/filters/SpecialsFilterTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/queryparser/AdvancedQueryParserTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/queryparser/CustomQueryParser.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/sorting/DistanceSortingTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/framework/FileIndexer.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/msdoc/AntiWordHandler.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/pdf/PDFHandler.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/xml/Digester.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/xml/DigesterXMLHandler.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/BaseIndexingTestCase.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/CompoundVersusMultiFileIndexTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/FSversusRAMDirectoryTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/FieldLengthTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/IndexTuningDemo.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/VerboseIndexing.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/meetlucene/Indexer.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/meetlucene/Searcher.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/BooleanQueryTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/Explainer.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/QueryParserTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/ScoreTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/BerkeleyDbIndexer.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/BerkeleyDbSearcher.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/HighlightTest.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/T9er.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/util/ClassLoader.py
lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/util/Streams.py
lucene/pylucene/branches/python_3/test/BaseTestRangeFilter.py
lucene/pylucene/branches/python_3/test/BaseTokenStreamTestCase.py
lucene/pylucene/branches/python_3/test/test_BinaryDocument.py
lucene/pylucene/branches/python_3/test/test_BooleanPrefixQuery.py
lucene/pylucene/branches/python_3/test/test_Highlighter.py
lucene/pylucene/branches/python_3/test/test_ICUFoldingFilter.py
lucene/pylucene/branches/python_3/test/test_ICUNormalizer2Filter.py
lucene/pylucene/branches/python_3/test/test_ICUTransformFilter.py
lucene/pylucene/branches/python_3/test/test_PositionIncrement.py
lucene/pylucene/branches/python_3/test/test_PyLucene.py
lucene/pylucene/branches/python_3/test/test_PyLuceneThread.py
lucene/pylucene/branches/python_3/test/test_PythonDirectory.py
lucene/pylucene/branches/python_3/test/test_Sort.py
lucene/pylucene/branches/python_3/test/test_StopWords.py
lucene/pylucene/branches/python_3/test/test_TermRangeFilter.py
lucene/pylucene/branches/python_3/test/test_ThaiAnalyzer.py
lucene/pylucene/branches/python_3/test/test_bug1564.py
Modified: lucene/pylucene/branches/python_3/Makefile
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/Makefile?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/Makefile (original)
+++ lucene/pylucene/branches/python_3/Makefile Mon Jul 12 19:33:45 2010
@@ -32,104 +32,22 @@ MODULES=lucene-modules-$(LUCENE_VER)
# PREFIX: where programs are normally installed on your system (Unix).
# PREFIX_PYTHON: where your version of python is installed.
# JCC: how jcc is invoked, depending on the python version:
-# - python 2.7:
+# - python 3.2:
# $(PYTHON) -m jcc
-# - python 2.6:
+# - python 3.1:
# $(PYTHON) -m jcc.__main__
-# - python 2.5:
-# $(PYTHON) -m jcc
-# - python 2.4:
-# $(PYTHON) $(PREFIX_PYTHON)/lib/python2.4/site-packages/jcc/__init__.py
# NUM_FILES is the number of wrapper files to generate. By default, jcc
# generates all C++ classes into one single file. This may exceed a compiler
# limit.
#
-# Mac OS X 10.6 (64-bit Python 2.6, Java 1.6)
-#PREFIX_PYTHON=/usr
-#ANT=ant
-#PYTHON=$(PREFIX_PYTHON)/bin/python
-#JCC=$(PYTHON) -m jcc.__main__ --shared --arch x86_64
-#NUM_FILES=3
-
# Mac OS X 10.6 (MacPorts 1.8.0 64-bit Python 2.7, Java 1.6)
-#PREFIX_PYTHON=/opt/local
-#ANT=ant
-#PYTHON=$(PREFIX_PYTHON)/bin/python
-#JCC=$(PYTHON) -m jcc --arch x86_64
-#NUM_FILES=3
-
-# Mac OS X 10.6 (64-bit and 32-bit Python 2.6 together, Java 1.6)
-#PREFIX_PYTHON=/usr
-#ANT=ant
-#PYTHON=$(PREFIX_PYTHON)/bin/python
-#JCC=$(PYTHON) -m jcc.__main__ --shared --arch x86_64 --arch i386
-#NUM_FILES=3
-
-# Mac OS X 10.5 (32-bit Python 2.5, Java 1.5)
-#PREFIX_PYTHON=/usr
-#ANT=ant
-#PYTHON=$(PREFIX_PYTHON)/bin/python
-#JCC=$(PYTHON) -m jcc --shared
-#NUM_FILES=3
-
-# Mac OS X (Python 2.3.5, Java 1.5, setuptools 0.6c7, Intel Mac OS X 10.4)
-#PREFIX_PYTHON=/usr
-#ANT=ant
-#PYTHON=$(PREFIX_PYTHON)/bin/python
-#JCC=$(PYTHON) /System/Library/Frameworks/Python.framework/Versions/2.3/lib/python2.3/site-packages/JCC-2.3-py2.3-macosx-10.4-i386.egg/jcc/__init__.py
-#NUM_FILES=3
-
-# Mac OS X (Python 2.3.5, Java 1.5, setuptools 0.6c7, PPC Mac OS X 10.4)
-#PREFIX_PYTHON=/usr
+#PREFIX_PYTHON=/Users/vajda/apache/python3/_install
#ANT=ant
#PYTHON=$(PREFIX_PYTHON)/bin/python
-#JCC=$(PYTHON) /System/Library/Frameworks/Python.framework/Versions/2.3/lib/python2.3/site-packages/JCC-2.3-py2.3-macosx-10.4-ppc.egg/jcc/__init__.py
+#JCC=$(PYTHON) -m jcc.__main__ --arch x86_64
#NUM_FILES=3
-# Linux (Ubuntu 6.06, Python 2.4, Java 1.5, no setuptools)
-#PREFIX_PYTHON=/usr
-#ANT=ant
-#PYTHON=$(PREFIX_PYTHON)/bin/python
-#JCC=$(PYTHON) $(PREFIX_PYTHON)/lib/python2.4/site-packages/jcc/__init__.py
-#NUM_FILES=3
-
-# Linux (Ubuntu 8.10 64-bit, Python 2.5.2, OpenJDK 1.6, setuptools 0.6c9)
-#PREFIX_PYTHON=/usr
-#ANT=ant
-#PYTHON=$(PREFIX_PYTHON)/bin/python
-#JCC=$(PYTHON) -m jcc --shared
-#NUM_FILES=3
-
-# FreeBSD
-#PREFIX_PYTHON=/usr
-#ANT=ant
-#PYTHON=$(PREFIX_PYTHON)/bin/python
-#JCC=$(PYTHON) -m jcc
-#NUM_FILES=3
-
-# Solaris (Solaris 11, Python 2.4 32-bit, Sun Studio 12, Java 1.6)
-#PREFIX_PYTHON=/usr
-#ANT=/usr/local/apache-ant-1.7.0/bin/ant
-#PYTHON=$(PREFIX_PYTHON)/bin/python
-#JCC=$(PYTHON) $(PREFIX_PYTHON)/lib/python2.4/site-packages/jcc/__init__.py
-#NUM_FILES=3
-
-# Windows (Win32, Python 2.5.1, Java 1.6, ant 1.7.0)
-#PREFIX_PYTHON=/cygdrive/o/Python-2.5.2/PCbuild
-#ANT=JAVA_HOME=o:\\Java\\jdk1.6.0_02 /cygdrive/o/java/apache-ant-1.7.0/bin/ant
-#PYTHON=$(PREFIX_PYTHON)/python.exe
-#JCC=$(PYTHON) -m jcc --shared
-#NUM_FILES=3
-
-# Windows (Win32, msys/MinGW, Python 2.6.4, Java 1.6, ant 1.7.1 (WinAnt))
-#PREFIX_PYTHON=/c/Python26
-#ANT=JAVA_HOME="c:\\Program Files\\Java\\jdk1.6.0_18" "/c/Program Files/WinAnt/bin/ant"
-#PYTHON=$(PREFIX_PYTHON)/python.exe
-#JCC=$(PYTHON) -m jcc.__main__ --shared --compiler mingw32
-#NUM_FILES=3
-
-
#
# No edits required below
#
@@ -203,7 +121,7 @@ ifneq ($(ICUPKG),)
ICURES= $(MODULES)/analysis/icu/src/resources
RESOURCES=--resources $(ICURES)
-ENDIANNESS:=$(shell $(PYTHON) -c "import struct; print struct.pack('h', 1) == '\000\001' and 'b' or 'l'")
+ENDIANNESS:=$(shell $(PYTHON) -c "import struct; print(struct.pack('h', 1) == '\000\001' and 'b' or 'l')")
resources: $(ICURES)/org/apache/lucene/analysis/icu/utr30.dat
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/index.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/index.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/index.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/index.py Mon Jul 12 19:33:45 2010
@@ -16,6 +16,6 @@ while True:
member = tar.next()
if member is None:
break
- print member.name
+ print(member.name)
tar.extract(member, baseDir)
tar.close()
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/BooksLikeThis.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/BooksLikeThis.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/BooksLikeThis.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/BooksLikeThis.py Mon Jul 12 19:33:45 2010
@@ -29,19 +29,19 @@ class BooksLikeThis(object):
reader = IndexReader.open(directory, True)
blt = BooksLikeThis(reader)
- for id in xrange(reader.maxDoc()):
+ for id in range(reader.maxDoc()):
if reader.isDeleted(id):
continue
doc = reader.document(id)
- print ''
- print doc.get("title").encode('utf-8')
+ print('')
+ print(doc.get("title").encode('utf-8'))
docs = blt.docsLike(id, doc, 10)
if not docs:
- print " None like this"
+ print(" None like this")
else:
for doc in docs:
- print " ->", doc.get("title").encode('utf-8')
+ print(" ->", doc.get("title").encode('utf-8'))
def __init__(self, reader):
@@ -72,7 +72,7 @@ class BooksLikeThis(object):
likeThisQuery.add(TermQuery(Term("isbn", doc.get("isbn"))),
BooleanClause.Occur.MUST_NOT)
- print " Query:", likeThisQuery.toString("contents")
+ print(" Query:", likeThisQuery.toString("contents"))
scoreDocs = self.searcher.search(likeThisQuery, 50).scoreDocs
docs = []
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/CategorizerTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/CategorizerTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/CategorizerTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/CategorizerTest.py Mon Jul 12 19:33:45 2010
@@ -37,16 +37,16 @@ class CategorizerTest(LiaTestCase):
def dumpCategoryVectors(self):
- for category, vectorMap in self.categoryMap.iteritems():
- print "Category", category
- for term, freq in vectorMap.iteritems():
- print " ", term, "=", freq
+ for category, vectorMap in self.categoryMap.items():
+ print("Category", category)
+ for term, freq in vectorMap.items():
+ print(" ", term, "=", freq)
def buildCategoryVectors(self):
reader = IndexReader.open(self.directory, True)
- for id in xrange(reader.maxDoc()):
+ for id in range(reader.maxDoc()):
doc = reader.document(id)
category = doc.get("category")
vectorMap = self.categoryMap.get(category, None)
@@ -77,7 +77,7 @@ class CategorizerTest(LiaTestCase):
bestAngle = 2 * pi
bestCategory = None
- for category, vectorMap in self.categoryMap.iteritems():
+ for category, vectorMap in self.categoryMap.items():
angle = self.computeAngle(words, category, vectorMap)
if angle != 'nan' and angle < bestAngle:
bestAngle = angle
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/MultiPhraseQueryTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/MultiPhraseQueryTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/MultiPhraseQueryTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/MultiPhraseQueryTest.py Mon Jul 12 19:33:45 2010
@@ -45,7 +45,7 @@ class MultiPhraseQueryTest(TestCase):
query.add([Term("field", "quick"),
Term("field", "fast")])
query.add(Term("field", "fox"))
- print query
+ print(query)
topDocs = self.searcher.search(query, 10)
self.assertEqual(1, topDocs.totalHits, "fast fox match")
@@ -74,4 +74,4 @@ class MultiPhraseQueryTest(TestCase):
def debug(self, hits):
for i, doc in hits:
- print "%s: %s" %(hits.score(i), doc['field'])
+ print("%s: %s" %(hits.score(i), doc['field']))
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/SortingExample.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/SortingExample.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/SortingExample.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/SortingExample.py Mon Jul 12 19:33:45 2010
@@ -44,9 +44,9 @@ class SortingExample(object):
searcher.search(query, None, collector)
scoreDocs = collector.topDocs().scoreDocs
- print "\nResults for:", query, "sorted by", sort
- print "Title".rjust(30), "pubmonth".rjust(10), \
- "id".center(4), "score".center(15)
+ print("\nResults for:", query, "sorted by", sort)
+ print("Title".rjust(30), "pubmonth".rjust(10), \
+ "id".center(4), "score".center(15))
scoreFormatter = DecimalFormat("0.######")
for scoreDoc in scoreDocs:
@@ -54,11 +54,11 @@ class SortingExample(object):
title = doc["title"]
if len(title) > 30:
title = title[:30]
- print title.encode('ascii', 'replace').rjust(30), \
+ print(title.encode('ascii', 'replace').rjust(30), \
doc["pubmonth"].rjust(10), \
str(scoreDoc.doc).center(4), \
- scoreFormatter.format(scoreDoc.score).ljust(12)
- print " ", doc["category"]
+ scoreFormatter.format(scoreDoc.score).ljust(12))
+ print(" ", doc["category"])
# print searcher.explain(query, scoreDoc.doc)
searcher.close()
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/SpanQueryTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/SpanQueryTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/SpanQueryTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/advsearching/SpanQueryTest.py Mon Jul 12 19:33:45 2010
@@ -13,7 +13,7 @@
# ====================================================================
from unittest import TestCase
-from cStringIO import StringIO
+from io import StringIO
from lucene import \
WhitespaceAnalyzer, Document, Field, IndexReader, IndexWriter, Term, \
@@ -167,7 +167,7 @@ class SpanQueryTest(TestCase):
def dumpSpans(self, query):
spans = query.getSpans(self.reader)
- print "%s:" % query
+ print("%s:" % query)
numSpans = 0
scoreDocs = self.searcher.search(query, 50).scoreDocs
@@ -206,10 +206,10 @@ class SpanQueryTest(TestCase):
buffer.write(str(scores[id]))
buffer.write(") ")
- print buffer.getvalue()
+ print(buffer.getvalue())
# print self.searcher.explain(query, id)
if numSpans == 0:
- print " No spans"
+ print(" No spans")
- print ''
+ print('')
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/AnalyzerDemo.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/AnalyzerDemo.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/AnalyzerDemo.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/AnalyzerDemo.py Mon Jul 12 19:33:45 2010
@@ -42,14 +42,14 @@ class AnalyzerDemo(object):
def analyze(cls, text):
- print 'Analyzing "%s"' %(text)
+ print('Analyzing "%s"' %(text))
for analyzer in cls.analyzers:
name = type(analyzer).__name__
- print " %s:" %(name),
+ print(" %s:" %(name), end=' ')
AnalyzerUtils.displayTokens(analyzer, text)
- print
- print
+ print()
+ print()
main = classmethod(main)
analyze = classmethod(analyze)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/AnalyzerUtils.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/AnalyzerUtils.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/AnalyzerUtils.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/AnalyzerUtils.py Mon Jul 12 19:33:45 2010
@@ -22,12 +22,12 @@ class AnalyzerUtils(object):
def main(cls, argv):
- print "SimpleAnalyzer"
+ print("SimpleAnalyzer")
cls.displayTokensWithFullDetails(SimpleAnalyzer(),
"The quick brown fox....")
- print "\n----"
- print "StandardAnalyzer"
+ print("\n----")
+ print("StandardAnalyzer")
cls.displayTokensWithFullDetails(StandardAnalyzer(Version.LUCENE_CURRENT), "I'll e-mail you at xyz@example.com")
def setPositionIncrement(cls, source, posIncr):
@@ -61,7 +61,7 @@ class AnalyzerUtils(object):
term = tokenStream.addAttribute(CharTermAttribute.class_)
while tokenStream.incrementToken():
- print "[%s]" %(term.toString()),
+ print("[%s]" %(term.toString()), end=' ')
def displayTokensWithPositions(cls, analyzer, text):
@@ -74,10 +74,10 @@ class AnalyzerUtils(object):
increment = posIncr.getPositionIncrement()
if increment > 0:
position = position + increment
- print "\n%d:" %(position),
+ print("\n%d:" %(position), end=' ')
- print "[%s]" %(term.toString()),
- print
+ print("[%s]" %(term.toString()), end=' ')
+ print()
def displayTokensWithFullDetails(cls, analyzer, text):
@@ -93,13 +93,13 @@ class AnalyzerUtils(object):
increment = posIncr.getPositionIncrement()
if increment > 0:
position = position + increment
- print "\n%d:" %(position),
+ print("\n%d:" %(position), end=' ')
- print "[%s:%d->%d:%s]" %(term.toString(),
+ print("[%s:%d->%d:%s]" %(term.toString(),
offset.startOffset(),
offset.endOffset(),
- type.type()),
- print
+ type.type()), end=' ')
+ print()
def assertAnalyzesTo(cls, analyzer, input, outputs):
@@ -107,12 +107,12 @@ class AnalyzerUtils(object):
termAttr = stream.addAttribute(CharTermAttribute.class_)
for output in outputs:
if not stream.incrementToken():
- raise AssertionError, 'stream.incremementToken()'
+ raise AssertionError('stream.incremementToken()')
if output != termAttr.toString():
- raise AssertionError, 'output == termAttr.toString())'
+ raise AssertionError('output == termAttr.toString())')
if stream.incrementToken():
- raise AssertionError, 'not stream.incremementToken()'
+ raise AssertionError('not stream.incremementToken()')
stream.close()
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/keyword/KeywordAnalyzer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/keyword/KeywordAnalyzer.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/keyword/KeywordAnalyzer.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/keyword/KeywordAnalyzer.py Mon Jul 12 19:33:45 2010
@@ -28,7 +28,7 @@ class KeywordAnalyzer(PythonAnalyzer):
super(_tokenStream, self).__init__()
self.done = False
- def next(self):
+ def __next__(self):
if not self.done:
self.done = True
text = JArray('char')(1024)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/positional/PositionalPorterStopAnalyzerTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/positional/PositionalPorterStopAnalyzerTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/positional/PositionalPorterStopAnalyzerTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/positional/PositionalPorterStopAnalyzerTest.py Mon Jul 12 19:33:45 2010
@@ -81,6 +81,6 @@ class PositionalPorterStopAnalyzerTest(T
text = "The quick brown fox jumps over the lazy dogs"
AnalyzerUtils.displayTokensWithPositions(cls.porterAnalyzer, text)
- print ''
+ print('')
main = classmethod(main)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/positional/PositionalStopFilter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/positional/PositionalStopFilter.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/positional/PositionalStopFilter.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/positional/PositionalStopFilter.py Mon Jul 12 19:33:45 2010
@@ -27,7 +27,7 @@ class PositionalStopFilter(PythonTokenFi
self.input = tokenStream
self.stopWords = stopWords
- def next(self):
+ def __next__(self):
increment = 0
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/synonym/SynonymAnalyzerTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/synonym/SynonymAnalyzerTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/synonym/SynonymAnalyzerTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/analysis/synonym/SynonymAnalyzerTest.py Mon Jul 12 19:33:45 2010
@@ -97,10 +97,10 @@ class SynonymAnalyzerTest(TestCase):
query = QueryParser(Version.LUCENE_CURRENT, "content",
cls.synonymAnalyzer).parse('"fox jumps"')
- print "\"fox jumps\" parses to ", query.toString("content")
+ print("\"fox jumps\" parses to ", query.toString("content"))
- print "From AnalyzerUtils.tokensFromAnalysis: "
+ print("From AnalyzerUtils.tokensFromAnalysis: ")
AnalyzerUtils.displayTokens(cls.synonymAnalyzer, "\"fox jumps\"")
- print ''
+ print('')
main = classmethod(main)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/common/LiaTestCase.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/common/LiaTestCase.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/common/LiaTestCase.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/common/LiaTestCase.py Mon Jul 12 19:33:45 2010
@@ -41,11 +41,11 @@ class LiaTestCase(TestCase):
def dumpHits(self, searcher, scoreDocs):
if not scoreDocs:
- print "No hits"
+ print("No hits")
else:
for scoreDoc in scoreDocs:
- print "%s: %s" %(scoreDoc.score,
- searcher.doc(scoreDoc.doc).get('title'))
+ print("%s: %s" %(scoreDoc.score,
+ searcher.doc(scoreDoc.doc).get('title')))
def assertHitsIncludeTitle(self, searcher, scoreDocs, title,
fail=False):
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/common/TestDataDocumentHandler.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/common/TestDataDocumentHandler.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/common/TestDataDocumentHandler.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/common/TestDataDocumentHandler.py Mon Jul 12 19:33:45 2010
@@ -42,14 +42,14 @@ class TestDataDocumentHandler(object):
def indexFile(cls, writer, path, baseDir):
- input = file(path)
+ input = open(path, encoding='unicode-escape')
props = {}
while True:
line = input.readline().strip()
if not line:
break
name, value = line.split('=', 1)
- props[name] = value.decode('unicode-escape')
+ props[name] = value
input.close()
doc = Document()
@@ -66,11 +66,11 @@ class TestDataDocumentHandler(object):
subject = props['subject']
pubmonth = props['pubmonth']
- print title.encode('utf8')
- print author.encode('utf-8')
- print subject.encode('utf-8')
- print category.encode('utf-8')
- print "---------"
+ print((title.encode('utf8')))
+ print((author.encode('utf-8')))
+ print((subject.encode('utf-8')))
+ print((category.encode('utf-8')))
+ print("---------")
doc.add(Field("isbn", isbn,
Field.Store.YES, Field.Index.NOT_ANALYZED))
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/collector/BookLinkCollector.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/collector/BookLinkCollector.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/collector/BookLinkCollector.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/collector/BookLinkCollector.py Mon Jul 12 19:33:45 2010
@@ -44,7 +44,7 @@ class BookLinkCollector(PythonCollector)
title = self.titles.getTerm(docID, term).utf8ToString()
self.documents[url] = title
- print "%s: %s" %(title, score)
+ print("%s: %s" %(title, score))
def getLinks(self):
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/filters/SpecialsFilter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/filters/SpecialsFilter.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/filters/SpecialsFilter.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/filters/SpecialsFilter.py Mon Jul 12 19:33:45 2010
@@ -28,7 +28,7 @@ class SpecialsFilter(PythonFilter):
def getDocIdSet(self, reader):
- bits = OpenBitSet(long(reader.maxDoc()))
+ bits = OpenBitSet(int(reader.maxDoc()))
isbns = self.accessor.isbns()
for isbn in isbns:
@@ -38,7 +38,7 @@ class SpecialsFilter(PythonFilter):
result = docsEnum.getBulkResult()
count = docsEnum.read()
if count == 1:
- bits.set(long(result.docs.ints[0]))
+ bits.set(int(result.docs.ints[0]))
return bits
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/filters/SpecialsFilterTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/filters/SpecialsFilterTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/filters/SpecialsFilterTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/filters/SpecialsFilterTest.py Mon Jul 12 19:33:45 2010
@@ -56,5 +56,5 @@ class SpecialsFilterTest(LiaTestCase):
logoOrEdBooks.add(edBooksOnSpecial, BooleanClause.Occur.SHOULD)
topDocs = self.searcher.search(logoOrEdBooks, 50)
- print logoOrEdBooks
+ print(logoOrEdBooks)
self.assertEqual(2, topDocs.totalHits, "Papert and Steiner")
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/queryparser/AdvancedQueryParserTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/queryparser/AdvancedQueryParserTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/queryparser/AdvancedQueryParserTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/queryparser/AdvancedQueryParserTest.py Mon Jul 12 19:33:45 2010
@@ -33,7 +33,7 @@ class AdvancedQueryParserTest(TestCase):
writer = IndexWriter(self.directory, self.analyzer, True,
IndexWriter.MaxFieldLength.LIMITED)
- for i in xrange(1, 501):
+ for i in range(1, 501):
doc = Document()
doc.add(Field("id", NumberUtils.pad(i),
Field.Store.YES, Field.Index.NOT_ANALYZED))
@@ -89,8 +89,8 @@ class AdvancedQueryParserTest(TestCase):
scoreDocs = searcher.search(query, 1000).scoreDocs
self.assertEqual(310, len(scoreDocs))
- print parser.parse("special:[term TO *]")
- print parser.parse("special:[* TO term]")
+ print(parser.parse("special:[term TO *]"))
+ print(parser.parse("special:[* TO term]"))
def testPhraseQuery(self):
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/queryparser/CustomQueryParser.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/queryparser/CustomQueryParser.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/queryparser/CustomQueryParser.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/queryparser/CustomQueryParser.py Mon Jul 12 19:33:45 2010
@@ -29,10 +29,10 @@ class CustomQueryParser(PythonQueryParse
super(CustomQueryParser, self).__init__(Version.LUCENE_CURRENT, field, analyzer)
def getFuzzyQuery(self, field, termText, minSimilarity):
- raise AssertionError, "Fuzzy queries not allowed"
+ raise AssertionError("Fuzzy queries not allowed")
def getWildcardQuery(self, field, termText):
- raise AssertionError, "Wildcard queries not allowed"
+ raise AssertionError("Wildcard queries not allowed")
#
# Special handling for the "id" field, pads each part
@@ -51,7 +51,7 @@ class CustomQueryParser(PythonQueryParse
inclusive, True)
if field == "special":
- print part1, "->", part2
+ print(part1, "->", part2)
return TermRangeQuery("field", part1, part2, inclusive, True)
@@ -88,10 +88,10 @@ class MultiFieldCustomQueryParser(Python
super(MultiFieldCustomQueryParser, self).__init__(Version.LUCENE_CURRENT, fields, analyzer)
def getFuzzyQuery(self, super, field, termText, minSimilarity):
- raise AssertionError, "Fuzzy queries not allowed"
+ raise AssertionError("Fuzzy queries not allowed")
def getWildcardQuery(self, super, field, termText):
- raise AssertionError, "Wildcard queries not allowed"
+ raise AssertionError("Wildcard queries not allowed")
#
# Special handling for the "id" field, pads each part
@@ -110,7 +110,7 @@ class MultiFieldCustomQueryParser(Python
inclusive, True)
if field == "special":
- print part1, "->", part2
+ print(part1, "->", part2)
return TermRangeQuery("field", part1, part2, inclusive, True)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/sorting/DistanceSortingTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/sorting/DistanceSortingTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/sorting/DistanceSortingTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/extsearch/sorting/DistanceSortingTest.py Mon Jul 12 19:33:45 2010
@@ -82,10 +82,10 @@ class DistanceSortingTest(TestCase):
def dumpDocs(self, sort, docs):
- print "Sorted by:", sort
+ print("Sorted by:", sort)
for scoreDoc in docs.scoreDocs:
fieldDoc = FieldDoc.cast_(scoreDoc)
distance = Double.cast_(fieldDoc.fields[0]).doubleValue()
doc = self.searcher.doc(fieldDoc.doc)
- print " %(name)s @ (%(location)s) ->" %doc, distance
+ print(" %(name)s @ (%(location)s) ->" %doc, distance)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/framework/FileIndexer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/framework/FileIndexer.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/framework/FileIndexer.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/framework/FileIndexer.py Mon Jul 12 19:33:45 2010
@@ -31,7 +31,7 @@ class FileIndexer(object):
def main(cls, argv):
if len(argv) != 3:
- print "Usage: python FileIndexer.py <index dir> <data dir>"
+ print("Usage: python FileIndexer.py <index dir> <data dir>")
return
indexDir = argv[1]
@@ -56,12 +56,12 @@ class FileIndexer(object):
numIndexed = cls.index(indexDir, dataDir)
duration = timedelta(seconds=time() - start)
- print "Indexing %s files took %s" %(numIndexed, duration)
+ print("Indexing %s files took %s" %(numIndexed, duration))
def index(cls, indexDir, dataDir):
if not (os.path.exists(dataDir) and os.path.isdir(dataDir)):
- raise IOError, "%s does not exist or is not a directory" %(dataDir)
+ raise IOError("%s does not exist or is not a directory" %(dataDir))
writer = IndexWriter(indexDir, StandardAnalyzer(), True,
IndexWriter.MaxFieldLength.UNLIMITED)
@@ -101,7 +101,7 @@ class FileIndexer(object):
if ext:
handlerClassName = cls.handlerProps.get(ext, None)
if handlerClassName is None:
- print "error indexing %s: no handler for %s files" %(path, ext)
+ print("error indexing %s: no handler for %s files" %(path, ext))
return None
try:
@@ -110,13 +110,13 @@ class FileIndexer(object):
doc = handler.indexFile(writer, path)
if doc is not None:
- print 'indexed', path
+ print('indexed', path)
return doc
except SyntaxError:
raise
- except Exception, e:
- print 'error indexing %s: %s' %(path, e)
+ except Exception as e:
+ print('error indexing %s: %s' %(path, e))
return None
main = classmethod(main)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/msdoc/AntiWordHandler.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/msdoc/AntiWordHandler.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/msdoc/AntiWordHandler.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/msdoc/AntiWordHandler.py Mon Jul 12 19:33:45 2010
@@ -37,6 +37,6 @@ class AntiWordHandler(object):
exitCode = process.wait()
if exitCode != 0:
- raise RuntimeError, "pdftotext exit code %d" %(exitCode)
+ raise RuntimeError("pdftotext exit code %d" %(exitCode))
return doc
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/pdf/PDFHandler.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/pdf/PDFHandler.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/pdf/PDFHandler.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/pdf/PDFHandler.py Mon Jul 12 19:33:45 2010
@@ -39,7 +39,7 @@ class PDFHandler(object):
exitCode = process.wait()
if exitCode != 0:
- raise RuntimeError, "pdfinfo exit code %d" %(exitCode)
+ raise RuntimeError("pdfinfo exit code %d" %(exitCode))
try:
process = popen2.Popen4(["pdftotext", "-enc", "UTF-8", path, "-"])
@@ -54,6 +54,6 @@ class PDFHandler(object):
exitCode = process.wait()
if exitCode != 0:
- raise RuntimeError, "pdftotext exit code %d" %(exitCode)
+ raise RuntimeError("pdftotext exit code %d" %(exitCode))
return doc
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/xml/Digester.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/xml/Digester.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/xml/Digester.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/xml/Digester.py Mon Jul 12 19:33:45 2010
@@ -47,7 +47,7 @@ class Digester(xml.sax.ContentHandler):
self.path.append(tag)
pairs = self.attributes.get('/'.join(self.path))
if pairs is not None:
- for name, value in attrs.items():
+ for name, value in list(attrs.items()):
property = pairs.get(name)
if property is not None:
self.properties[property] = value
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/xml/DigesterXMLHandler.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/xml/DigesterXMLHandler.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/xml/DigesterXMLHandler.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/handlingtypes/xml/DigesterXMLHandler.py Mon Jul 12 19:33:45 2010
@@ -37,7 +37,7 @@ class DigesterXMLHandler(object):
try:
file = open(path)
- except IOError, e:
+ except IOError as e:
raise
else:
props = self.digester.parse(file)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/BaseIndexingTestCase.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/BaseIndexingTestCase.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/BaseIndexingTestCase.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/BaseIndexingTestCase.py Mon Jul 12 19:33:45 2010
@@ -51,7 +51,7 @@ class BaseIndexingTestCase(TestCase):
IndexWriter.MaxFieldLength.UNLIMITED)
writer.setUseCompoundFile(self.isCompound())
- for i in xrange(len(self.keywords)):
+ for i in range(len(self.keywords)):
doc = Document()
doc.add(Field("id", self.keywords[i],
Field.Store.YES, Field.Index.NOT_ANALYZED))
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/CompoundVersusMultiFileIndexTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/CompoundVersusMultiFileIndexTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/CompoundVersusMultiFileIndexTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/CompoundVersusMultiFileIndexTest.py Mon Jul 12 19:33:45 2010
@@ -56,8 +56,8 @@ class CompoundVersusMultiFileIndexTest(T
cTiming = self.timeIndexWriter(self.cDir, True)
mTiming = self.timeIndexWriter(self.mDir, False)
- print "Compound Time :", cTiming
- print "Multi-file Time:", mTiming
+ print("Compound Time :", cTiming)
+ print("Multi-file Time:", mTiming)
self.assert_(cTiming > mTiming)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/FSversusRAMDirectoryTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/FSversusRAMDirectoryTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/FSversusRAMDirectoryTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/FSversusRAMDirectoryTest.py Mon Jul 12 19:33:45 2010
@@ -53,8 +53,8 @@ class FSversusRAMDirectoryTest(TestCase)
#self.assert_(fsTiming > ramTiming)
- print "RAMDirectory Time:", ramTiming
- print "FSDirectory Time :", fsTiming
+ print("RAMDirectory Time:", ramTiming)
+ print("FSDirectory Time :", fsTiming)
def timeIndexWriter(self, dir):
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/FieldLengthTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/FieldLengthTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/FieldLengthTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/FieldLengthTest.py Mon Jul 12 19:33:45 2010
@@ -14,7 +14,7 @@
import os
-from itertools import izip
+
from unittest import TestCase
from time import time
from datetime import timedelta
@@ -62,7 +62,7 @@ class FieldLengthTest(TestCase):
IndexWriter.MaxFieldLength(maxFieldLength))
for keyword, unindexed, unstored, text in \
- izip(self.keywords, self.unindexed, self.unstored, self.text):
+ zip(self.keywords, self.unindexed, self.unstored, self.text):
doc = Document()
doc.add(Field("id", keyword,
Field.Store.YES, Field.Index.NOT_ANALYZED))
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/IndexTuningDemo.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/IndexTuningDemo.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/IndexTuningDemo.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/IndexTuningDemo.py Mon Jul 12 19:33:45 2010
@@ -26,7 +26,7 @@ class IndexTuningDemo(object):
def main(cls, argv):
if len(argv) < 5:
- print "Usage: python IndexTuningDemo.py <numDocs> <mergeFactor> <maxMergeDocs> <maxBufferedDocs>"
+ print("Usage: python IndexTuningDemo.py <numDocs> <mergeFactor> <maxMergeDocs> <maxBufferedDocs>")
return
docsInIndex = int(argv[1])
@@ -44,18 +44,18 @@ class IndexTuningDemo(object):
writer.setMaxBufferedDocs(int(argv[4]))
# writer.infoStream = System.out
- print "Merge factor: ", writer.getMergeFactor()
- print "Max merge docs:", writer.getMaxMergeDocs()
- print "Max buffered docs:", writer.getMaxBufferedDocs()
+ print("Merge factor: ", writer.getMergeFactor())
+ print("Max merge docs:", writer.getMaxMergeDocs())
+ print("Max buffered docs:", writer.getMaxBufferedDocs())
start = time()
- for i in xrange(docsInIndex):
+ for i in range(docsInIndex):
doc = Document()
doc.add(Field("fieldname", "Bibamus",
Field.Store.YES, Field.Index.TOKENIZED))
writer.addDocument(doc)
writer.close()
- print "Time: ", timedelta(seconds=time() - start)
+ print("Time: ", timedelta(seconds=time() - start))
main = classmethod(main)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/VerboseIndexing.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/VerboseIndexing.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/VerboseIndexing.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/indexing/VerboseIndexing.py Mon Jul 12 19:33:45 2010
@@ -34,7 +34,7 @@ class VerboseIndexing(object):
writer.setInfoStream(System.out)
- for i in xrange(100):
+ for i in range(100):
doc = Document()
doc.add(Field("keyword", "goober",
Field.Store.YES, Field.Index.UN_TOKENIZED))
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/meetlucene/Indexer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/meetlucene/Indexer.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/meetlucene/Indexer.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/meetlucene/Indexer.py Mon Jul 12 19:33:45 2010
@@ -26,7 +26,7 @@ class Indexer(object):
def main(cls, argv):
if len(argv) != 3:
- print "Usage: python Indexer.py <index dir> <data dir>"
+ print("Usage: python Indexer.py <index dir> <data dir>")
else:
indexDir = argv[1]
@@ -36,12 +36,12 @@ class Indexer(object):
numIndexed = cls.index(indexDir, dataDir)
duration = timedelta(seconds=time() - start)
- print "Indexing %s files took %s" %(numIndexed, duration)
+ print("Indexing %s files took %s" %(numIndexed, duration))
def index(cls, indexDir, dataDir):
if not (os.path.exists(dataDir) and os.path.isdir(dataDir)):
- raise IOError, "%s does not exist or is not a directory" %(dataDir)
+ raise IOError("%s does not exist or is not a directory" %(dataDir))
writer = IndexWriter(indexDir, StandardAnalyzer(), True)
writer.setUseCompoundFile(False)
@@ -68,10 +68,10 @@ class Indexer(object):
try:
reader = InputStreamReader(FileInputStream(path), 'iso-8859-1')
- except IOError, e:
- print 'IOError while opening %s: %s' %(path, e)
+ except IOError as e:
+ print('IOError while opening %s: %s' %(path, e))
else:
- print 'Indexing', path
+ print('Indexing', path)
doc = Document()
doc.add(Field("contents", reader))
doc.add(Field("path", os.path.abspath(path),
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/meetlucene/Searcher.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/meetlucene/Searcher.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/meetlucene/Searcher.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/meetlucene/Searcher.py Mon Jul 12 19:33:45 2010
@@ -26,14 +26,14 @@ class Searcher(object):
def main(cls, argv):
if len(argv) != 3:
- print "Usage: python Searcher.py <index dir> <query>"
+ print("Usage: python Searcher.py <index dir> <query>")
else:
indexDir = argv[1]
q = argv[2]
if not (os.path.exists(indexDir) and os.path.isdir(indexDir)):
- raise IOError, "%s does not exist or is not a directory" %(indexDir)
+ raise IOError("%s does not exist or is not a directory" %(indexDir))
cls.search(indexDir, q)
@@ -47,11 +47,11 @@ class Searcher(object):
hits = searcher.search(query)
duration = timedelta(seconds=time() - start)
- print "Found %d document(s) (in %s) that matched query '%s':" %(hits.length(), duration, q)
+ print("Found %d document(s) (in %s) that matched query '%s':" %(hits.length(), duration, q))
for hit in hits:
doc = Hit.cast_(hit).getDocument()
- print doc["path"]
+ print(doc["path"])
main = classmethod(main)
search = classmethod(search)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/BooleanQueryTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/BooleanQueryTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/BooleanQueryTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/BooleanQueryTest.py Mon Jul 12 19:33:45 2010
@@ -51,9 +51,9 @@ class BooleanQueryTest(LiaTestCase):
searcher = IndexSearcher(self.directory, True)
scoreDocs = searcher.search(enlightenmentBooks, 50).scoreDocs
- print "or =", enlightenmentBooks
+ print("or =", enlightenmentBooks)
self.assertHitsIncludeTitle(searcher, scoreDocs,
"Extreme Programming Explained")
self.assertHitsIncludeTitle(searcher, scoreDocs,
- u"Tao Te Ching \u9053\u5FB7\u7D93")
+ "Tao Te Ching \u9053\u5FB7\u7D93")
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/Explainer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/Explainer.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/Explainer.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/Explainer.py Mon Jul 12 19:33:45 2010
@@ -24,7 +24,7 @@ class Explainer(object):
def main(cls, argv):
if len(argv) != 3:
- print "Usage: Explainer <index dir> <query>"
+ print("Usage: Explainer <index dir> <query>")
else:
indexDir = argv[1]
@@ -34,7 +34,7 @@ class Explainer(object):
query = QueryParser(Version.LUCENE_CURRENT, "contents",
SimpleAnalyzer()).parse(queryExpression)
- print "Query:", queryExpression
+ print("Query:", queryExpression)
searcher = IndexSearcher(directory)
scoreDocs = searcher.search(query, 50).scoreDocs
@@ -42,9 +42,9 @@ class Explainer(object):
for scoreDoc in scoreDocs:
doc = searcher.doc(scoreDoc.doc)
explanation = searcher.explain(query, scoreDoc.doc)
- print "----------"
- print doc["title"].encode('utf-8')
- print explanation
+ print("----------")
+ print(doc["title"].encode('utf-8'))
+ print(explanation)
main = classmethod(main)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/QueryParserTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/QueryParserTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/QueryParserTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/QueryParserTest.py Mon Jul 12 19:33:45 2010
@@ -45,7 +45,7 @@ class QueryParserTest(LiaTestCase):
StandardAnalyzer(Version.LUCENE_CURRENT))
parser.setLowercaseExpandedTerms(False)
- print parser.parse("/Computers/technology*").toString("category")
+ print(parser.parse("/Computers/technology*").toString("category"))
def testGrouping(self):
@@ -82,7 +82,7 @@ class QueryParserTest(LiaTestCase):
parser = QueryParser(Version.LUCENE_CURRENT, "subject", self.analyzer)
parser.setLocale(Locale.US)
query = parser.parse(expression)
- print expression, "parsed to", query
+ print(expression, "parsed to", query)
topDocs = self.searcher.search(query, 50)
self.assert_(topDocs.totalHits > 0)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/ScoreTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/ScoreTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/ScoreTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/searching/ScoreTest.py Mon Jul 12 19:33:45 2010
@@ -63,7 +63,7 @@ class ScoreTest(LiaTestCase):
query = TermQuery(Term("contents", "x"))
explanation = searcher.explain(query, 0)
- print explanation
+ print(explanation)
scoreDocs = searcher.search(query, 50).scoreDocs
self.assertEqual(1, len(scoreDocs))
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/BerkeleyDbIndexer.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/BerkeleyDbIndexer.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/BerkeleyDbIndexer.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/BerkeleyDbIndexer.py Mon Jul 12 19:33:45 2010
@@ -30,7 +30,7 @@ class BerkeleyDbIndexer(object):
def main(cls, argv):
if len(argv) < 2:
- print "Usage: BerkeleyDbIndexer <index dir> -create"
+ print("Usage: BerkeleyDbIndexer <index dir> -create")
return
dbHome = argv[1]
@@ -48,7 +48,7 @@ class BerkeleyDbIndexer(object):
if os.name == 'nt':
env.set_cachesize(0, 0x4000000, 1)
elif os.name == 'posix':
- from commands import getstatusoutput
+ from subprocess import getstatusoutput
if getstatusoutput('uname') == (0, 'Linux'):
env.set_cachesize(0, 0x4000000, 1)
@@ -99,6 +99,6 @@ class BerkeleyDbIndexer(object):
blocks.close()
env.close()
- print "Indexing Complete"
+ print("Indexing Complete")
main = classmethod(main)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/BerkeleyDbSearcher.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/BerkeleyDbSearcher.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/BerkeleyDbSearcher.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/BerkeleyDbSearcher.py Mon Jul 12 19:33:45 2010
@@ -29,7 +29,7 @@ class BerkeleyDbSearcher(object):
def main(cls, argv):
if len(argv) != 2:
- print "Usage: BerkeleyDbSearcher <index dir>"
+ print("Usage: BerkeleyDbSearcher <index dir>")
return
dbHome = argv[1]
@@ -39,7 +39,7 @@ class BerkeleyDbSearcher(object):
if os.name == 'nt':
env.set_cachesize(0, 0x4000000, 1)
elif os.name == 'posix':
- from commands import getstatusoutput
+ from subprocess import getstatusoutput
if getstatusoutput('uname') == (0, 'Linux'):
env.set_cachesize(0, 0x4000000, 1)
@@ -71,7 +71,7 @@ class BerkeleyDbSearcher(object):
searcher = IndexSearcher(directory, True)
topDocs = searcher.search(TermQuery(Term("contents", "fox")), 50)
- print topDocs.totalHits, "document(s) found"
+ print(topDocs.totalHits, "document(s) found")
searcher.close()
except:
if txn is not None:
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/HighlightTest.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/HighlightTest.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/HighlightTest.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/HighlightTest.py Mon Jul 12 19:33:45 2010
@@ -49,4 +49,4 @@ class HighlightTest(LiaTestCase):
stream = SimpleAnalyzer(Version.LUCENE_CURRENT).tokenStream("title", StringReader(title))
fragment = highlighter.getBestFragment(stream, title)
- print fragment
+ print(fragment)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/T9er.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/T9er.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/T9er.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/tools/T9er.py Mon Jul 12 19:33:45 2010
@@ -27,7 +27,7 @@ class T9er(object):
def main(cls, argv):
if len(argv) != 3:
- print "Usage: T9er <WordNet index dir> <t9 index>"
+ print("Usage: T9er <WordNet index dir> <t9 index>")
return
for key in cls.keys:
@@ -35,7 +35,7 @@ class T9er(object):
k = key[1:]
for kc in k:
cls.keyMap[kc] = c
- print kc, "=", c
+ print(kc, "=", c)
indexDir = argv[1]
t9dir = argv[2]
@@ -43,11 +43,11 @@ class T9er(object):
reader = IndexReader.open(indexDir)
numDocs = reader.maxDoc()
- print "Processing", numDocs, "words"
+ print("Processing", numDocs, "words")
writer = IndexWriter(t9dir, WhitespaceAnalyzer(), True)
- for id in xrange(reader.maxDoc()):
+ for id in range(reader.maxDoc()):
origDoc = reader.document(id)
word = origDoc.get("word")
if word is None or len(word) == 0:
@@ -62,7 +62,7 @@ class T9er(object):
Field.Store.NO, Field.Index.UN_TOKENIZED))
writer.addDocument(newDoc)
if id % 100 == 0:
- print "Document", id
+ print("Document", id)
writer.optimize()
writer.close()
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/util/ClassLoader.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/util/ClassLoader.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/util/ClassLoader.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/util/ClassLoader.py Mon Jul 12 19:33:45 2010
@@ -31,11 +31,11 @@ class ClassLoader(object):
raise
except:
x, value, traceback = sys.exc_info()
- raise ImportError, value, traceback
+ raise ImportError(value).with_traceback(traceback)
try:
return getattr(m, name)
except AttributeError:
- raise ImportError, "Module %s has no class %s" %(module, name)
+ raise ImportError("Module %s has no class %s" %(module, name))
loadClass = classmethod(loadClass)
Modified: lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/util/Streams.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/util/Streams.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/util/Streams.py (original)
+++ lucene/pylucene/branches/python_3/samples/LuceneInAction/lia/util/Streams.py Mon Jul 12 19:33:45 2010
@@ -12,8 +12,8 @@
# limitations under the License.
# ====================================================================
-from StringIO import StringIO
-from HTMLParser import HTMLParser
+from io import StringIO
+from html.parser import HTMLParser
class InputStreamReader(object):
@@ -31,7 +31,7 @@ class InputStreamReader(object):
def read(self, length=-1):
text = self._read(length)
- text = unicode(text, self.encoding)
+ text = str(text, self.encoding)
return text
Modified: lucene/pylucene/branches/python_3/test/BaseTestRangeFilter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/BaseTestRangeFilter.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/BaseTestRangeFilter.py (original)
+++ lucene/pylucene/branches/python_3/test/BaseTestRangeFilter.py Mon Jul 12 19:33:45 2010
@@ -67,7 +67,7 @@ class BaseTestRangeFilter(TestCase):
IndexWriter.MaxFieldLength.LIMITED)
seed(101)
- for d in xrange(self.minId, self.maxId + 1):
+ for d in range(self.minId, self.maxId + 1):
doc = Document()
doc.add(Field("id", self.pad(d), Field.Store.YES,
Field.Index.NOT_ANALYZED));
@@ -95,7 +95,7 @@ class BaseTestRangeFilter(TestCase):
tests = [-9999999, -99560, -100, -3, -1, 0, 3, 9, 10, 1000, 999999999]
- for i in xrange(0, len(tests) - 1):
+ for i in range(0, len(tests) - 1):
a = tests[i]
b = tests[i + 1]
aa = self.pad(a)
Modified: lucene/pylucene/branches/python_3/test/BaseTokenStreamTestCase.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/BaseTokenStreamTestCase.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/BaseTokenStreamTestCase.py (original)
+++ lucene/pylucene/branches/python_3/test/BaseTokenStreamTestCase.py Mon Jul 12 19:33:45 2010
@@ -49,7 +49,7 @@ class BaseTokenStreamTestCase(TestCase):
posIncrAtt = ts.getAttribute(PositionIncrementAttribute.class_)
ts.reset()
- for i in xrange(len(output)):
+ for i in range(len(output)):
# extra safety to enforce, that the state is not preserved and
# also assign bogus values
ts.clearAttributes()
Modified: lucene/pylucene/branches/python_3/test/test_BinaryDocument.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/test_BinaryDocument.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/test_BinaryDocument.py (original)
+++ lucene/pylucene/branches/python_3/test/test_BinaryDocument.py Mon Jul 12 19:33:45 2010
@@ -23,8 +23,8 @@ class TestBinaryDocument(TestCase):
def testBinaryFieldInIndex(self):
- bytes = JArray('byte')(self.binaryValStored)
- binaryFldStored = Field("binaryStored", bytes,
+ data = JArray('byte')(bytes(self.binaryValStored, 'utf-8'))
+ binaryFldStored = Field("binaryStored", data,
Field.Store.YES)
stringFldStored = Field("stringStored", self.binaryValStored,
Field.Store.YES, Field.Index.NO,
@@ -32,9 +32,9 @@ class TestBinaryDocument(TestCase):
try:
# binary fields with store off are not allowed
- Field("fail", bytes, Field.Store.NO)
+ Field("fail", data, Field.Store.NO)
self.fail()
- except JavaError, e:
+ except JavaError as e:
self.assertEqual(e.getJavaException().getClass().getName(),
'java.lang.IllegalArgumentException')
@@ -59,8 +59,8 @@ class TestBinaryDocument(TestCase):
# fetch the binary stored field and compare it's content with the
# original one
- bytes = docFromReader.getBinaryValue("binaryStored")
- binaryFldStoredTest = bytes.string_
+ data = docFromReader.getBinaryValue("binaryStored")
+ binaryFldStoredTest = data.string_
self.assertEqual(binaryFldStoredTest, self.binaryValStored)
# fetch the string field and compare it's content with the original
@@ -77,8 +77,8 @@ class TestBinaryDocument(TestCase):
def testCompressionTools(self):
- bytes = JArray('byte')(self.binaryValCompressed)
- binaryFldCompressed = Field("binaryCompressed", CompressionTools.compress(bytes), Field.Store.YES)
+ data = JArray('byte')(bytes(self.binaryValCompressed, 'utf-8'))
+ binaryFldCompressed = Field("binaryCompressed", CompressionTools.compress(data), Field.Store.YES)
stringFldCompressed = Field("stringCompressed", CompressionTools.compressString(self.binaryValCompressed), Field.Store.YES)
doc = Document()
@@ -99,8 +99,8 @@ class TestBinaryDocument(TestCase):
# fetch the binary compressed field and compare it's content with
# the original one
- bytes = CompressionTools.decompress(docFromReader.getBinaryValue("binaryCompressed"))
- binaryFldCompressedTest = bytes.string_
+ data = CompressionTools.decompress(docFromReader.getBinaryValue("binaryCompressed"))
+ binaryFldCompressedTest = data.string_
self.assertEqual(binaryFldCompressedTest, self.binaryValCompressed)
self.assertEqual(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")), self.binaryValCompressed)
Modified: lucene/pylucene/branches/python_3/test/test_BooleanPrefixQuery.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/test_BooleanPrefixQuery.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/test_BooleanPrefixQuery.py (original)
+++ lucene/pylucene/branches/python_3/test/test_BooleanPrefixQuery.py Mon Jul 12 19:33:45 2010
@@ -60,7 +60,7 @@ class BooleanPrefixQueryTestCase(TestCas
bq.add(query, BooleanClause.Occur.MUST)
rw2 = bq.rewrite(reader)
- except Exception, e:
+ except Exception as e:
self.fail(e)
self.assertEqual(self.getCount(reader, rw1), self.getCount(reader, rw2),
Modified: lucene/pylucene/branches/python_3/test/test_Highlighter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/test_Highlighter.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/test_Highlighter.py (original)
+++ lucene/pylucene/branches/python_3/test/test_Highlighter.py Mon Jul 12 19:33:45 2010
@@ -65,7 +65,7 @@ class HighlighterTestCase(TestCase):
result = highlighter.getBestFragments(tokenStream, text,
maxNumFragmentsRequired,
"...")
- print "\t", result
+ print("\t", result)
# Not sure we can assert anything here - just running to check we don't
# throw any exceptions
@@ -86,7 +86,7 @@ class HighlighterTestCase(TestCase):
# fuzzy etc) you must use a rewritten query!
self.query = self.query.rewrite(self.reader)
- print "Searching for:", self.query.toString(self.FIELD_NAME)
+ print("Searching for:", self.query.toString(self.FIELD_NAME))
self.scoreDocs = self.searcher.search(self.query, 100).scoreDocs
self.numHighlights = 0
@@ -107,7 +107,7 @@ class HighlighterTestCase(TestCase):
text,
maxNumFragmentsRequired,
fragmentSeparator)
- print "\t", result
+ print("\t", result)
def countHighlightTerm(self):
Modified: lucene/pylucene/branches/python_3/test/test_ICUFoldingFilter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/test_ICUFoldingFilter.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/test_ICUFoldingFilter.py (original)
+++ lucene/pylucene/branches/python_3/test/test_ICUFoldingFilter.py Mon Jul 12 19:33:45 2010
@@ -18,7 +18,7 @@
try:
from icu import Normalizer2, UNormalizationMode2
-except ImportError, e:
+except ImportError as e:
pass
from unittest import main
@@ -44,32 +44,32 @@ class TestICUFoldingFilter(BaseTokenStre
[ "this", "is", "a", "test" ])
# case folding
- self._assertAnalyzesTo(a, u"RuÃ", [ "russ" ])
+ self._assertAnalyzesTo(a, "RuÃ", [ "russ" ])
# case folding with accent removal
- self._assertAnalyzesTo(a, u"ÎÎΪÎΣ", [ u"μαιοÏ" ])
- self._assertAnalyzesTo(a, u"ÎάÏοÏ", [ u"μαιοÏ" ])
+ self._assertAnalyzesTo(a, "ÎÎΪÎΣ", [ "μαιοÏ" ])
+ self._assertAnalyzesTo(a, "ÎάÏοÏ", [ "μαιοÏ" ])
# supplementary case folding
- self._assertAnalyzesTo(a, u"ð", [ u"ð¾" ])
+ self._assertAnalyzesTo(a, "ð", [ "ð¾" ])
# normalization
- self._assertAnalyzesTo(a, u"ﴳﴺﰧ", [ u"Ø·Ù
Ø·Ù
Ø·Ù
" ])
+ self._assertAnalyzesTo(a, "ﴳﴺﰧ", [ "Ø·Ù
Ø·Ù
Ø·Ù
" ])
# removal of default ignorables
- self._assertAnalyzesTo(a, u"à¤à¥âष", [ u"à¤à¤·" ])
+ self._assertAnalyzesTo(a, "à¤à¥âष", [ "à¤à¤·" ])
# removal of latin accents (composed)
- self._assertAnalyzesTo(a, u"résumé", [ "resume" ])
+ self._assertAnalyzesTo(a, "résumé", [ "resume" ])
# removal of latin accents (decomposed)
- self._assertAnalyzesTo(a, u"re\u0301sume\u0301", [ u"resume" ])
+ self._assertAnalyzesTo(a, "re\u0301sume\u0301", [ "resume" ])
# fold native digits
- self._assertAnalyzesTo(a, u"à§à§¦à§¬", [ "706" ])
+ self._assertAnalyzesTo(a, "à§à§¦à§¬", [ "706" ])
# ascii-folding-filter type stuff
- self._assertAnalyzesTo(a, u"Äis is cræzy", [ "dis", "is", "craezy" ])
+ self._assertAnalyzesTo(a, "Äis is cræzy", [ "dis", "is", "craezy" ])
if __name__ == "__main__":
Modified: lucene/pylucene/branches/python_3/test/test_ICUNormalizer2Filter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/test_ICUNormalizer2Filter.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/test_ICUNormalizer2Filter.py (original)
+++ lucene/pylucene/branches/python_3/test/test_ICUNormalizer2Filter.py Mon Jul 12 19:33:45 2010
@@ -18,7 +18,7 @@
try:
from icu import Normalizer2, UNormalizationMode2
-except ImportError, e:
+except ImportError as e:
pass
from unittest import main
@@ -47,17 +47,17 @@ class TestICUNormalizer2Filter(BaseToken
self._assertAnalyzesTo(a, "RuÃ", [ "russ" ])
# case folding
- self._assertAnalyzesTo(a, u"ÎÎΪÎΣ", [ u"μάÏοÏ" ])
- self._assertAnalyzesTo(a, u"ÎάÏοÏ", [ u"μάÏοÏ" ])
+ self._assertAnalyzesTo(a, "ÎÎΪÎΣ", [ "μάÏοÏ" ])
+ self._assertAnalyzesTo(a, "ÎάÏοÏ", [ "μάÏοÏ" ])
# supplementary case folding
- self._assertAnalyzesTo(a, u"ð", [ u"ð¾" ])
+ self._assertAnalyzesTo(a, "ð", [ "ð¾" ])
# normalization
- self._assertAnalyzesTo(a, u"ﴳﴺﰧ", [ u"Ø·Ù
Ø·Ù
Ø·Ù
" ])
+ self._assertAnalyzesTo(a, "ﴳﴺﰧ", [ "Ø·Ù
Ø·Ù
Ø·Ù
" ])
# removal of default ignorables
- self._assertAnalyzesTo(a, u"à¤à¥âष", [ u"à¤à¥à¤·" ])
+ self._assertAnalyzesTo(a, "à¤à¥âष", [ "à¤à¥à¤·" ])
def testAlternate(self):
@@ -71,7 +71,7 @@ class TestICUNormalizer2Filter(BaseToken
a = analyzer()
# decompose EAcute into E + combining Acute
- self._assertAnalyzesTo(a, u"\u00E9", [ u"\u0065\u0301" ])
+ self._assertAnalyzesTo(a, "\u00E9", [ "\u0065\u0301" ])
if __name__ == "__main__":
Modified: lucene/pylucene/branches/python_3/test/test_ICUTransformFilter.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/test_ICUTransformFilter.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/test_ICUTransformFilter.py (original)
+++ lucene/pylucene/branches/python_3/test/test_ICUTransformFilter.py Mon Jul 12 19:33:45 2010
@@ -18,7 +18,7 @@
try:
from icu import Transliterator, UTransDirection
-except ImportError, e:
+except ImportError as e:
pass
from unittest import main
@@ -43,17 +43,17 @@ class TestICUTransformFilter(BaseTokenSt
def testBasicFunctionality(self):
self._checkToken(self._getTransliterator("Traditional-Simplified"),
- u"ç°¡åå", u"ç®åå")
+ "ç°¡åå", "ç®åå")
self._checkToken(self._getTransliterator("Katakana-Hiragana"),
- u"ãã©ã¬ã", u"ã²ãããª")
+ "ãã©ã¬ã", "ã²ãããª")
self._checkToken(self._getTransliterator("Fullwidth-Halfwidth"),
- u"ã¢ã«ã¢ããªã¦", u"ï½±ï¾ï½±ï¾ï¾ï½³")
+ "ã¢ã«ã¢ããªã¦", "ï½±ï¾ï½±ï¾ï¾ï½³")
self._checkToken(self._getTransliterator("Any-Latin"),
- u"ÎλÏαβηÏικÏÏ ÎαÏάλογοÏ", u"AlphabÄtikós Katálogos")
+ "ÎλÏαβηÏικÏÏ ÎαÏάλογοÏ", "AlphabÄtikós Katálogos")
self._checkToken(self._getTransliterator("NFD; [:Nonspacing Mark:] Remove"),
- u"AlphabÄtikós Katálogos", u"Alphabetikos Katalogos")
+ "AlphabÄtikós Katálogos", "Alphabetikos Katalogos")
self._checkToken(self._getTransliterator("Han-Latin"),
- u"ä¸å½", u"zhÅng guó")
+ "ä¸å½", "zhÅng guó")
def testCustomFunctionality(self):
Modified: lucene/pylucene/branches/python_3/test/test_PositionIncrement.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/test_PositionIncrement.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/test_PositionIncrement.py (original)
+++ lucene/pylucene/branches/python_3/test/test_PositionIncrement.py Mon Jul 12 19:33:45 2010
@@ -245,7 +245,7 @@ class PositionIncrementTestCase(TestCase
it = payloads.iterator()
while it.hasNext():
count += 1
- it.next()
+ next(it)
self.assertEqual(5, count)
self.assert_(sawZero)
@@ -266,8 +266,8 @@ class PositionIncrementTestCase(TestCase
count = pls.size()
it = pls.iterator()
while it.hasNext():
- bytes = JArray('byte').cast_(it.next())
- s = bytes.string_
+ data = JArray('byte').cast_(next(it))
+ s = data.string_
sawZero |= s == "pos: 0"
self.assertEqual(5, count)
@@ -318,8 +318,8 @@ class PayloadFilter(PythonTokenFilter):
def incrementToken(self):
if self.input.incrementToken():
- bytes = JArray('byte')("pos: %d" %(self.pos))
- self.payloadAttr.setPayload(Payload(bytes))
+ data = JArray('byte')(bytes("pos: %d" %(self.pos), 'utf-8'))
+ self.payloadAttr.setPayload(Payload(data))
if self.i % 2 == 1:
posIncr = 1
Modified: lucene/pylucene/branches/python_3/test/test_PyLucene.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/test_PyLucene.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/test_PyLucene.py (original)
+++ lucene/pylucene/branches/python_3/test/test_PyLucene.py Mon Jul 12 19:33:45 2010
@@ -113,7 +113,7 @@ class Test_PyLuceneBase(object):
# using a unicode body cause problems, which seems very odd
# since the python type is the same regardless affter doing
# the encode
- body_text = u"hello world"*20
+ body_text = "hello world"*20
body_reader = StringReader(body_text)
doc.add(Field("content", body_reader))
Modified: lucene/pylucene/branches/python_3/test/test_PyLuceneThread.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/test_PyLuceneThread.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/test_PyLuceneThread.py (original)
+++ lucene/pylucene/branches/python_3/test/test_PyLuceneThread.py Mon Jul 12 19:33:45 2010
@@ -71,7 +71,7 @@ class PyLuceneThreadTestCase(TestCase):
""" Run 5 threads with 2000 queries each """
threads = []
- for i in xrange(5):
+ for i in range(5):
threads.append(threading.Thread(target=self.runSearch,
args=(2000,)))
Modified: lucene/pylucene/branches/python_3/test/test_PythonDirectory.py
URL: http://svn.apache.org/viewvc/lucene/pylucene/branches/python_3/test/test_PythonDirectory.py?rev=963446&r1=963445&r2=963446&view=diff
==============================================================================
--- lucene/pylucene/branches/python_3/test/test_PythonDirectory.py (original)
+++ lucene/pylucene/branches/python_3/test/test_PythonDirectory.py Mon Jul 12 19:33:45 2010
@@ -36,7 +36,7 @@ class DebugWrapper(object):
self.obj = obj
def __getattr__(self, name):
- print self.obj.__class__.__name__, self.obj.name, name
+ print(self.obj.__class__.__name__, self.obj.name, name)
sys.stdout.flush()
return getattr(self.obj, name)
@@ -106,7 +106,7 @@ class PythonFileStreamInput(PythonIndexI
self.isClone = clone
def length(self):
- return long(self._length)
+ return int(self._length)
def clone(self):
clone = PythonFileStreamInput(self.name, self.fh, self._length, True)
@@ -142,14 +142,13 @@ class PythonFileStreamOutput(PythonIndex
self.fh.close()
def length(self):
- return long(self._length)
+ return int(self._length)
def seekInternal(self, pos):
self.fh.seek(pos)
def flushBuffer(self, bytes):
-
- self.fh.write(bytes.string_)
+ self.fh.write(bytes.bytes_)
self.fh.flush()
self._length += len(bytes)
@@ -185,7 +184,7 @@ class PythonFileDirectory(PythonDirector
def fileLength(self, name):
file_path = os.path.join(self.path, name)
- return long(os.path.getsize(file_path))
+ return int(os.path.getsize(file_path))
def fileModified(self, name):
file_path = os.path.join(self.path, name)
@@ -202,7 +201,7 @@ class PythonFileDirectory(PythonDirector
try:
fh = open(file_path, "rb")
except IOError:
- raise JavaError, IOException(name)
+ raise JavaError(IOException(name))
stream = PythonFileStreamInput(name, fh, os.path.getsize(file_path))
self._streams.append(stream)
return stream
@@ -242,9 +241,9 @@ class PythonDirectoryTests(unittest.Test
store.close()
def test_IncrementalLoop(self):
- print "Testing Indexing Incremental Looping"
+ print("Testing Indexing Incremental Looping")
for i in range(100):
- print "indexing ", i
+ print("indexing ", i)
sys.stdout.flush()
self.test_indexDocument()
@@ -259,9 +258,9 @@ if __name__ == "__main__":
unittest.main()
except:
pass
- print 'inputs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonIndexOutput', 0)
- print 'outputs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonIndexInput', 0)
- print 'locks', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonLock', 0)
- print 'dirs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonLock', 0)
+ print('inputs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonIndexOutput', 0))
+ print('outputs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonIndexInput', 0))
+ print('locks', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonLock', 0))
+ print('dirs', env._dumpRefs(True).get('class org.osafoundation.lucene.store.PythonLock', 0))
else:
unittest.main()