You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by bu...@apache.org on 2008/11/19 00:41:51 UTC
svn commit: r718798 [3/4] - in /lucene/java/trunk: ./
src/java/org/apache/lucene/analysis/
src/java/org/apache/lucene/analysis/standard/
src/java/org/apache/lucene/analysis/tokenattributes/
src/java/org/apache/lucene/index/ src/java/org/apache/lucene/q...
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TeeSinkTokenTest.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TeeSinkTokenTest.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TeeSinkTokenTest.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TeeSinkTokenTest.java Tue Nov 18 15:41:49 2008
@@ -18,6 +18,9 @@
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
@@ -40,7 +43,8 @@
super(s);
}
- protected void setUp() {
+ protected void setUp() throws Exception {
+ super.setUp();
tokens1 = new String[]{"The", "quick", "Burgundy", "Fox", "jumped", "over", "the", "lazy", "Red", "Dogs"};
tokens2 = new String[]{"The", "Lazy", "Dogs", "should", "stay", "on", "the", "porch"};
buffer1 = new StringBuffer();
@@ -62,24 +66,29 @@
public void test() throws IOException {
SinkTokenizer sink1 = new SinkTokenizer(null) {
- public void add(Token t) {
- if (t != null && t.term().equalsIgnoreCase("The")) {
- super.add(t);
+ public void add(AttributeSource a) throws IOException {
+ TermAttribute termAtt = null;
+ if (a.hasAttribute(TermAttribute.class)) {
+ termAtt = (TermAttribute) a.getAttribute(TermAttribute.class);
+ }
+ if (termAtt != null && termAtt.term().equalsIgnoreCase("The")) {
+ super.add(a);
}
}
};
TokenStream source = new TeeTokenFilter(new WhitespaceTokenizer(new StringReader(buffer1.toString())), sink1);
int i = 0;
- final Token reusableToken = new Token();
- for (Token nextToken = source.next(reusableToken); nextToken != null; nextToken = source.next(reusableToken)) {
- assertTrue(nextToken.term() + " is not equal to " + tokens1[i], nextToken.term().equals(tokens1[i]) == true);
+ TermAttribute termAtt = (TermAttribute) source.getAttribute(TermAttribute.class);
+ while (source.incrementToken()) {
+ assertTrue(termAtt.term() + " is not equal to " + tokens1[i], termAtt.term().equals(tokens1[i]) == true);
i++;
}
assertTrue(i + " does not equal: " + tokens1.length, i == tokens1.length);
assertTrue("sink1 Size: " + sink1.getTokens().size() + " is not: " + 2, sink1.getTokens().size() == 2);
i = 0;
- for (Token token = sink1.next(reusableToken); token != null; token = sink1.next(reusableToken)) {
- assertTrue(token.term() + " is not equal to " + "The", token.term().equalsIgnoreCase("The") == true);
+ termAtt = (TermAttribute) sink1.getAttribute(TermAttribute.class);
+ while (sink1.incrementToken()) {
+ assertTrue(termAtt.term() + " is not equal to " + "The", termAtt.term().equalsIgnoreCase("The") == true);
i++;
}
assertTrue(i + " does not equal: " + sink1.getTokens().size(), i == sink1.getTokens().size());
@@ -87,55 +96,67 @@
public void testMultipleSources() throws Exception {
SinkTokenizer theDetector = new SinkTokenizer(null) {
- public void add(Token t) {
- if (t != null && t.term().equalsIgnoreCase("The")) {
- super.add(t);
+ public void add(AttributeSource a) throws IOException {
+ TermAttribute termAtt = null;
+ if (a.hasAttribute(TermAttribute.class)) {
+ termAtt = (TermAttribute) a.getAttribute(TermAttribute.class);
+ }
+ if (termAtt != null && termAtt.term().equalsIgnoreCase("The")) {
+ super.add(a);
}
}
};
- SinkTokenizer dogDetector = new SinkTokenizer(null) {
- public void add(Token t) {
- if (t != null && t.term().equalsIgnoreCase("Dogs")) {
- super.add(t);
+ SinkTokenizer dogDetector = new SinkTokenizer(null) {
+ public void add(AttributeSource a) throws IOException {
+ TermAttribute termAtt = null;
+ if (a.hasAttribute(TermAttribute.class)) {
+ termAtt = (TermAttribute) a.getAttribute(TermAttribute.class);
+ }
+ if (termAtt != null && termAtt.term().equalsIgnoreCase("Dogs")) {
+ super.add(a);
}
}
};
TokenStream source1 = new CachingTokenFilter(new TeeTokenFilter(new TeeTokenFilter(new WhitespaceTokenizer(new StringReader(buffer1.toString())), theDetector), dogDetector));
TokenStream source2 = new TeeTokenFilter(new TeeTokenFilter(new WhitespaceTokenizer(new StringReader(buffer2.toString())), theDetector), dogDetector);
int i = 0;
- final Token reusableToken = new Token();
- for (Token nextToken = source1.next(reusableToken); nextToken != null; nextToken = source1.next(reusableToken)) {
- assertTrue(nextToken.term() + " is not equal to " + tokens1[i], nextToken.term().equals(tokens1[i]) == true);
+ TermAttribute termAtt = (TermAttribute) source1.getAttribute(TermAttribute.class);
+ while (source1.incrementToken()) {
+ assertTrue(termAtt.term() + " is not equal to " + tokens1[i], termAtt.term().equals(tokens1[i]) == true);
i++;
}
assertTrue(i + " does not equal: " + tokens1.length, i == tokens1.length);
assertTrue("theDetector Size: " + theDetector.getTokens().size() + " is not: " + 2, theDetector.getTokens().size() == 2);
assertTrue("dogDetector Size: " + dogDetector.getTokens().size() + " is not: " + 1, dogDetector.getTokens().size() == 1);
i = 0;
- for (Token nextToken = source2.next(reusableToken); nextToken != null; nextToken = source2.next(reusableToken)) {
- assertTrue(nextToken.term() + " is not equal to " + tokens2[i], nextToken.term().equals(tokens2[i]) == true);
+ termAtt = (TermAttribute) source2.getAttribute(TermAttribute.class);
+ while (source2.incrementToken()) {
+ assertTrue(termAtt.term() + " is not equal to " + tokens2[i], termAtt.term().equals(tokens2[i]) == true);
i++;
}
assertTrue(i + " does not equal: " + tokens2.length, i == tokens2.length);
assertTrue("theDetector Size: " + theDetector.getTokens().size() + " is not: " + 4, theDetector.getTokens().size() == 4);
assertTrue("dogDetector Size: " + dogDetector.getTokens().size() + " is not: " + 2, dogDetector.getTokens().size() == 2);
i = 0;
- for (Token nextToken = theDetector.next(reusableToken); nextToken != null; nextToken = theDetector.next(reusableToken)) {
- assertTrue(nextToken.term() + " is not equal to " + "The", nextToken.term().equalsIgnoreCase("The") == true);
+ termAtt = (TermAttribute) theDetector.getAttribute(TermAttribute.class);
+ while (theDetector.incrementToken()) {
+ assertTrue(termAtt.term() + " is not equal to " + "The", termAtt.term().equalsIgnoreCase("The") == true);
i++;
}
assertTrue(i + " does not equal: " + theDetector.getTokens().size(), i == theDetector.getTokens().size());
i = 0;
- for (Token nextToken = dogDetector.next(reusableToken); nextToken != null; nextToken = dogDetector.next(reusableToken)) {
- assertTrue(nextToken.term() + " is not equal to " + "Dogs", nextToken.term().equalsIgnoreCase("Dogs") == true);
+ termAtt = (TermAttribute) dogDetector.getAttribute(TermAttribute.class);
+ while (dogDetector.incrementToken()) {
+ assertTrue(termAtt.term() + " is not equal to " + "Dogs", termAtt.term().equalsIgnoreCase("Dogs") == true);
i++;
}
assertTrue(i + " does not equal: " + dogDetector.getTokens().size(), i == dogDetector.getTokens().size());
source1.reset();
TokenStream lowerCasing = new LowerCaseFilter(source1);
i = 0;
- for (Token nextToken = lowerCasing.next(reusableToken); nextToken != null; nextToken = lowerCasing.next(reusableToken)) {
- assertTrue(nextToken.term() + " is not equal to " + tokens1[i].toLowerCase(), nextToken.term().equals(tokens1[i].toLowerCase()) == true);
+ termAtt = (TermAttribute) lowerCasing.getAttribute(TermAttribute.class);
+ while (lowerCasing.incrementToken()) {
+ assertTrue(termAtt.term() + " is not equal to " + tokens1[i].toLowerCase(), termAtt.term().equals(tokens1[i].toLowerCase()) == true);
i++;
}
assertTrue(i + " does not equal: " + tokens1.length, i == tokens1.length);
@@ -157,21 +178,20 @@
}
//make sure we produce the same tokens
ModuloSinkTokenizer sink = new ModuloSinkTokenizer(tokCount[k], 100);
- final Token reusableToken = new Token();
TokenStream stream = new TeeTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), sink);
- while (stream.next(reusableToken) != null) {
+ while (stream.incrementToken()) {
}
stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), 100);
List tmp = new ArrayList();
- for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
- tmp.add(nextToken.clone());
+ while (stream.incrementToken()) {
+ tmp.add(stream.captureState());
}
List sinkList = sink.getTokens();
assertTrue("tmp Size: " + tmp.size() + " is not: " + sinkList.size(), tmp.size() == sinkList.size());
for (int i = 0; i < tmp.size(); i++) {
- Token tfTok = (Token) tmp.get(i);
- Token sinkTok = (Token) sinkList.get(i);
- assertTrue(tfTok.term() + " is not equal to " + sinkTok.term() + " at token: " + i, tfTok.term().equals(sinkTok.term()) == true);
+ AttributeSource tfTok = (AttributeSource) tmp.get(i);
+ AttributeSource sinkTok = (AttributeSource) sinkList.get(i);
+ assertTrue(tfTok + " is not equal to " + sinkTok + " at token: " + i, tfTok.equals(sinkTok) == true);
}
//simulate two fields, each being analyzed once, for 20 documents
@@ -180,12 +200,14 @@
long start = System.currentTimeMillis();
for (int i = 0; i < 20; i++) {
stream = new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString())));
- for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
- tfPos += nextToken.getPositionIncrement();
+ PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) stream.getAttribute(PositionIncrementAttribute.class);
+ while (stream.incrementToken()) {
+ tfPos += posIncrAtt.getPositionIncrement();
}
stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), modCounts[j]);
- for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
- tfPos += nextToken.getPositionIncrement();
+ posIncrAtt = (PositionIncrementAttribute) stream.getAttribute(PositionIncrementAttribute.class);
+ while (stream.incrementToken()) {
+ tfPos += posIncrAtt.getPositionIncrement();
}
}
long finish = System.currentTimeMillis();
@@ -196,13 +218,15 @@
for (int i = 0; i < 20; i++) {
sink = new ModuloSinkTokenizer(tokCount[k], modCounts[j]);
stream = new TeeTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), sink);
- for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
- sinkPos += nextToken.getPositionIncrement();
+ PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) stream.getAttribute(PositionIncrementAttribute.class);
+ while (stream.incrementToken()) {
+ sinkPos += posIncrAtt.getPositionIncrement();
}
//System.out.println("Modulo--------");
stream = sink;
- for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
- sinkPos += nextToken.getPositionIncrement();
+ posIncrAtt = (PositionIncrementAttribute) stream.getAttribute(PositionIncrementAttribute.class);
+ while (stream.incrementToken()) {
+ sinkPos += posIncrAtt.getPositionIncrement();
}
}
finish = System.currentTimeMillis();
@@ -228,15 +252,15 @@
int count = 0;
//return every 100 tokens
- public Token next(final Token reusableToken) throws IOException {
- Token nextToken = null;
- for (nextToken = input.next(reusableToken);
- nextToken != null && count % modCount != 0;
- nextToken = input.next(reusableToken)) {
+ public boolean incrementToken() throws IOException {
+ boolean hasNext;
+ for (hasNext = input.incrementToken();
+ hasNext && count % modCount != 0;
+ hasNext = input.incrementToken()) {
count++;
}
count++;
- return nextToken;
+ return hasNext;
}
}
@@ -250,9 +274,9 @@
lst = new ArrayList(numToks % mc);
}
- public void add(Token t) {
- if (t != null && count % modCount == 0) {
- super.add(t);
+ public void add(AttributeSource a) throws IOException {
+ if (a != null && count % modCount == 0) {
+ super.add(a);
}
count++;
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestAnalyzers.java Tue Nov 18 15:41:49 2008
@@ -19,10 +19,10 @@
import java.io.IOException;
import java.io.StringReader;
-import java.util.LinkedList;
-import java.util.List;
import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.Payload;
import org.apache.lucene.util.LuceneTestCase;
@@ -36,13 +36,12 @@
String input,
String[] output) throws Exception {
TokenStream ts = a.tokenStream("dummy", new StringReader(input));
- final Token reusableToken = new Token();
+ TermAttribute termAtt = (TermAttribute) ts.getAttribute(TermAttribute.class);
for (int i=0; i<output.length; i++) {
- Token nextToken = ts.next(reusableToken);
- assertNotNull(nextToken);
- assertEquals(nextToken.term(), output[i]);
+ assertTrue(ts.incrementToken());
+ assertEquals(termAtt.term(), output[i]);
}
- assertNull(ts.next(reusableToken));
+ assertFalse(ts.incrementToken());
ts.close();
}
@@ -95,14 +94,13 @@
}
void verifyPayload(TokenStream ts) throws IOException {
- final Token reusableToken = new Token();
+ PayloadAttribute payloadAtt = (PayloadAttribute) ts.getAttribute(PayloadAttribute.class);
for(byte b=1;;b++) {
- reusableToken.clear();
- Token nextToken = ts.next(reusableToken);
- if (nextToken==null) break;
+ boolean hasNext = ts.incrementToken();
+ if (!hasNext) break;
// System.out.println("id="+System.identityHashCode(nextToken) + " " + t);
// System.out.println("payload=" + (int)nextToken.getPayload().toByteArray()[0]);
- assertEquals(b, nextToken.getPayload().toByteArray()[0]);
+ assertEquals(b, payloadAtt.getPayload().toByteArray()[0]);
}
}
@@ -111,13 +109,11 @@
String s = "how now brown cow";
TokenStream ts;
ts = new WhitespaceTokenizer(new StringReader(s));
- ts = new BuffTokenFilter(ts);
ts = new PayloadSetter(ts);
verifyPayload(ts);
ts = new WhitespaceTokenizer(new StringReader(s));
ts = new PayloadSetter(ts);
- ts = new BuffTokenFilter(ts);
verifyPayload(ts);
}
@@ -136,38 +132,21 @@
}
}
-class BuffTokenFilter extends TokenFilter {
- List lst;
-
- public BuffTokenFilter(TokenStream input) {
- super(input);
- }
-
- public Token next(final Token reusableToken) throws IOException {
- if (lst == null) {
- lst = new LinkedList();
- for(Token nextToken = input.next(reusableToken); nextToken != null; nextToken = input.next(reusableToken)) {
- lst.add(nextToken.clone());
- }
- }
- return lst.size()==0 ? null : (Token)lst.remove(0);
- }
-}
-
class PayloadSetter extends TokenFilter {
+ PayloadAttribute payloadAtt;
public PayloadSetter(TokenStream input) {
super(input);
+ payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
}
byte[] data = new byte[1];
Payload p = new Payload(data,0,1);
- public Token next(final Token reusableToken) throws IOException {
- assert reusableToken != null;
- Token nextToken = input.next(reusableToken);
- if (nextToken==null) return null;
- nextToken.setPayload(p); // reuse the payload / byte[]
+ public boolean incrementToken() throws IOException {
+ boolean hasNext = input.incrementToken();
+ if (!hasNext) return false;
+ payloadAtt.setPayload(p); // reuse the payload / byte[]
data[0]++;
- return nextToken;
+ return true;
}
}
\ No newline at end of file
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java Tue Nov 18 15:41:49 2008
@@ -22,6 +22,8 @@
import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.TermVector;
@@ -41,13 +43,17 @@
Document doc = new Document();
TokenStream stream = new TokenStream() {
private int index = 0;
+ private TermAttribute termAtt = (TermAttribute) addAttribute(TermAttribute.class);
+ private OffsetAttribute offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
- public Token next(final Token reusableToken) throws IOException {
- assert reusableToken != null;
+ public boolean incrementToken() throws IOException {
if (index == tokens.length) {
- return null;
+ return false;
} else {
- return reusableToken.reinit(tokens[index++], 0, 0);
+ termAtt.setTermBuffer(tokens[index++]);
+ offsetAtt.setStartOffset(0);
+ offsetAtt.setEndOffset(0);
+ return true;
}
}
@@ -92,10 +98,12 @@
private void checkTokens(TokenStream stream) throws IOException {
int count = 0;
- final Token reusableToken = new Token();
- for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
+
+ TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
+ assertNotNull(termAtt);
+ while (stream.incrementToken()) {
assertTrue(count < tokens.length);
- assertEquals(tokens[count], nextToken.term());
+ assertEquals(tokens[count], termAtt.term());
count++;
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java Tue Nov 18 15:41:49 2008
@@ -17,6 +17,7 @@
* limitations under the License.
*/
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.LuceneTestCase;
import java.io.StringReader;
@@ -25,82 +26,87 @@
public void testU() throws Exception {
TokenStream stream = new WhitespaceTokenizer(new StringReader("Des mot clés à LA CHAÃNE à à à à à Ã
à à à à à à à à à à IJ à à à à à à à à Šà à à à à à Ÿ à á â ã ä Ã¥ æ ç è é ê ë ì à î ï ij ð ñ ò ó ô õ ö ø Šà þ ù ú û ü ý ÿ ï¬ ï¬"));
ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream);
- final Token reusableToken = new Token();
- assertEquals("Des", filter.next(reusableToken).term());
- assertEquals("mot", filter.next(reusableToken).term());
- assertEquals("cles", filter.next(reusableToken).term());
- assertEquals("A", filter.next(reusableToken).term());
- assertEquals("LA", filter.next(reusableToken).term());
- assertEquals("CHAINE", filter.next(reusableToken).term());
- assertEquals("A", filter.next(reusableToken).term());
- assertEquals("A", filter.next(reusableToken).term());
- assertEquals("A", filter.next(reusableToken).term());
- assertEquals("A", filter.next(reusableToken).term());
- assertEquals("A", filter.next(reusableToken).term());
- assertEquals("A", filter.next(reusableToken).term());
- assertEquals("AE", filter.next(reusableToken).term());
- assertEquals("C", filter.next(reusableToken).term());
- assertEquals("E", filter.next(reusableToken).term());
- assertEquals("E", filter.next(reusableToken).term());
- assertEquals("E", filter.next(reusableToken).term());
- assertEquals("E", filter.next(reusableToken).term());
- assertEquals("I", filter.next(reusableToken).term());
- assertEquals("I", filter.next(reusableToken).term());
- assertEquals("I", filter.next(reusableToken).term());
- assertEquals("I", filter.next(reusableToken).term());
- assertEquals("IJ", filter.next(reusableToken).term());
- assertEquals("D", filter.next(reusableToken).term());
- assertEquals("N", filter.next(reusableToken).term());
- assertEquals("O", filter.next(reusableToken).term());
- assertEquals("O", filter.next(reusableToken).term());
- assertEquals("O", filter.next(reusableToken).term());
- assertEquals("O", filter.next(reusableToken).term());
- assertEquals("O", filter.next(reusableToken).term());
- assertEquals("O", filter.next(reusableToken).term());
- assertEquals("OE", filter.next(reusableToken).term());
- assertEquals("TH", filter.next(reusableToken).term());
- assertEquals("U", filter.next(reusableToken).term());
- assertEquals("U", filter.next(reusableToken).term());
- assertEquals("U", filter.next(reusableToken).term());
- assertEquals("U", filter.next(reusableToken).term());
- assertEquals("Y", filter.next(reusableToken).term());
- assertEquals("Y", filter.next(reusableToken).term());
- assertEquals("a", filter.next(reusableToken).term());
- assertEquals("a", filter.next(reusableToken).term());
- assertEquals("a", filter.next(reusableToken).term());
- assertEquals("a", filter.next(reusableToken).term());
- assertEquals("a", filter.next(reusableToken).term());
- assertEquals("a", filter.next(reusableToken).term());
- assertEquals("ae", filter.next(reusableToken).term());
- assertEquals("c", filter.next(reusableToken).term());
- assertEquals("e", filter.next(reusableToken).term());
- assertEquals("e", filter.next(reusableToken).term());
- assertEquals("e", filter.next(reusableToken).term());
- assertEquals("e", filter.next(reusableToken).term());
- assertEquals("i", filter.next(reusableToken).term());
- assertEquals("i", filter.next(reusableToken).term());
- assertEquals("i", filter.next(reusableToken).term());
- assertEquals("i", filter.next(reusableToken).term());
- assertEquals("ij", filter.next(reusableToken).term());
- assertEquals("d", filter.next(reusableToken).term());
- assertEquals("n", filter.next(reusableToken).term());
- assertEquals("o", filter.next(reusableToken).term());
- assertEquals("o", filter.next(reusableToken).term());
- assertEquals("o", filter.next(reusableToken).term());
- assertEquals("o", filter.next(reusableToken).term());
- assertEquals("o", filter.next(reusableToken).term());
- assertEquals("o", filter.next(reusableToken).term());
- assertEquals("oe", filter.next(reusableToken).term());
- assertEquals("ss", filter.next(reusableToken).term());
- assertEquals("th", filter.next(reusableToken).term());
- assertEquals("u", filter.next(reusableToken).term());
- assertEquals("u", filter.next(reusableToken).term());
- assertEquals("u", filter.next(reusableToken).term());
- assertEquals("u", filter.next(reusableToken).term());
- assertEquals("y", filter.next(reusableToken).term());
- assertEquals("y", filter.next(reusableToken).term());
- assertEquals("fi", filter.next(reusableToken).term());
- assertEquals("fl", filter.next(reusableToken).term());
- assertNull(filter.next(reusableToken));
+ TermAttribute termAtt = (TermAttribute) filter.getAttribute(TermAttribute.class);
+ assertTermEquals("Des", filter, termAtt);
+ assertTermEquals("mot", filter, termAtt);
+ assertTermEquals("cles", filter, termAtt);
+ assertTermEquals("A", filter, termAtt);
+ assertTermEquals("LA", filter, termAtt);
+ assertTermEquals("CHAINE", filter, termAtt);
+ assertTermEquals("A", filter, termAtt);
+ assertTermEquals("A", filter, termAtt);
+ assertTermEquals("A", filter, termAtt);
+ assertTermEquals("A", filter, termAtt);
+ assertTermEquals("A", filter, termAtt);
+ assertTermEquals("A", filter, termAtt);
+ assertTermEquals("AE", filter, termAtt);
+ assertTermEquals("C", filter, termAtt);
+ assertTermEquals("E", filter, termAtt);
+ assertTermEquals("E", filter, termAtt);
+ assertTermEquals("E", filter, termAtt);
+ assertTermEquals("E", filter, termAtt);
+ assertTermEquals("I", filter, termAtt);
+ assertTermEquals("I", filter, termAtt);
+ assertTermEquals("I", filter, termAtt);
+ assertTermEquals("I", filter, termAtt);
+ assertTermEquals("IJ", filter, termAtt);
+ assertTermEquals("D", filter, termAtt);
+ assertTermEquals("N", filter, termAtt);
+ assertTermEquals("O", filter, termAtt);
+ assertTermEquals("O", filter, termAtt);
+ assertTermEquals("O", filter, termAtt);
+ assertTermEquals("O", filter, termAtt);
+ assertTermEquals("O", filter, termAtt);
+ assertTermEquals("O", filter, termAtt);
+ assertTermEquals("OE", filter, termAtt);
+ assertTermEquals("TH", filter, termAtt);
+ assertTermEquals("U", filter, termAtt);
+ assertTermEquals("U", filter, termAtt);
+ assertTermEquals("U", filter, termAtt);
+ assertTermEquals("U", filter, termAtt);
+ assertTermEquals("Y", filter, termAtt);
+ assertTermEquals("Y", filter, termAtt);
+ assertTermEquals("a", filter, termAtt);
+ assertTermEquals("a", filter, termAtt);
+ assertTermEquals("a", filter, termAtt);
+ assertTermEquals("a", filter, termAtt);
+ assertTermEquals("a", filter, termAtt);
+ assertTermEquals("a", filter, termAtt);
+ assertTermEquals("ae", filter, termAtt);
+ assertTermEquals("c", filter, termAtt);
+ assertTermEquals("e", filter, termAtt);
+ assertTermEquals("e", filter, termAtt);
+ assertTermEquals("e", filter, termAtt);
+ assertTermEquals("e", filter, termAtt);
+ assertTermEquals("i", filter, termAtt);
+ assertTermEquals("i", filter, termAtt);
+ assertTermEquals("i", filter, termAtt);
+ assertTermEquals("i", filter, termAtt);
+ assertTermEquals("ij", filter, termAtt);
+ assertTermEquals("d", filter, termAtt);
+ assertTermEquals("n", filter, termAtt);
+ assertTermEquals("o", filter, termAtt);
+ assertTermEquals("o", filter, termAtt);
+ assertTermEquals("o", filter, termAtt);
+ assertTermEquals("o", filter, termAtt);
+ assertTermEquals("o", filter, termAtt);
+ assertTermEquals("o", filter, termAtt);
+ assertTermEquals("oe", filter, termAtt);
+ assertTermEquals("ss", filter, termAtt);
+ assertTermEquals("th", filter, termAtt);
+ assertTermEquals("u", filter, termAtt);
+ assertTermEquals("u", filter, termAtt);
+ assertTermEquals("u", filter, termAtt);
+ assertTermEquals("u", filter, termAtt);
+ assertTermEquals("y", filter, termAtt);
+ assertTermEquals("y", filter, termAtt);
+ assertTermEquals("fi", filter, termAtt);
+ assertTermEquals("fl", filter, termAtt);
+ assertFalse(filter.incrementToken());
+ }
+
+ void assertTermEquals(String expected, TokenStream stream, TermAttribute termAtt) throws Exception {
+ assertTrue(stream.incrementToken());
+ assertEquals(expected, termAtt.term());
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java Tue Nov 18 15:41:49 2008
@@ -19,6 +19,7 @@
import java.io.StringReader;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
@@ -88,9 +89,9 @@
// LUCENE-1441
public void testOffsets() throws Exception {
TokenStream stream = new KeywordAnalyzer().tokenStream("field", new StringReader("abcd"));
- Token token = new Token();
- assertTrue(stream.next(token) != null);
- assertEquals(0, token.startOffset);
- assertEquals(4, token.endOffset);
+ OffsetAttribute offsetAtt = (OffsetAttribute) stream.addAttribute(OffsetAttribute.class);
+ assertTrue(stream.incrementToken());
+ assertEquals(0, offsetAtt.startOffset());
+ assertEquals(4, offsetAtt.endOffset());
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java Tue Nov 18 15:41:49 2008
@@ -17,6 +17,7 @@
* limitations under the License.
*/
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.LuceneTestCase;
import java.io.StringReader;
@@ -27,11 +28,15 @@
TokenStream stream = new WhitespaceTokenizer(
new StringReader("short toolong evenmuchlongertext a ab toolong foo"));
LengthFilter filter = new LengthFilter(stream, 2, 6);
- final Token reusableToken = new Token();
- assertEquals("short", filter.next(reusableToken).term());
- assertEquals("ab", filter.next(reusableToken).term());
- assertEquals("foo", filter.next(reusableToken).term());
- assertNull(filter.next(reusableToken));
+ TermAttribute termAtt = (TermAttribute) filter.getAttribute(TermAttribute.class);
+
+ assertTrue(filter.incrementToken());
+ assertEquals("short", termAtt.term());
+ assertTrue(filter.incrementToken());
+ assertEquals("ab", termAtt.term());
+ assertTrue(filter.incrementToken());
+ assertEquals("foo", termAtt.term());
+ assertFalse(filter.incrementToken());
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java Tue Nov 18 15:41:49 2008
@@ -1,8 +1,10 @@
package org.apache.lucene.analysis;
-import org.apache.lucene.util.LuceneTestCase;
import java.io.StringReader;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.util.LuceneTestCase;
+
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@@ -29,17 +31,19 @@
TokenStream tokenStream = analyzer.tokenStream("field",
new StringReader(text));
- final Token reusableToken = new Token();
- Token nextToken = tokenStream.next(reusableToken);
+ TermAttribute termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class);
+
+ assertTrue(tokenStream.incrementToken());
assertEquals("WhitespaceAnalyzer does not lowercase",
"Qwerty",
- nextToken.term());
+ termAtt.term());
tokenStream = analyzer.tokenStream("special",
new StringReader(text));
- nextToken = tokenStream.next(reusableToken);
+ termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class);
+ assertTrue(tokenStream.incrementToken());
assertEquals("SimpleAnalyzer lowercases",
"qwerty",
- nextToken.term());
+ termAtt.term());
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java Tue Nov 18 15:41:49 2008
@@ -1,6 +1,10 @@
package org.apache.lucene.analysis;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.LuceneTestCase;
import java.io.StringReader;
@@ -35,19 +39,25 @@
public void assertAnalyzesTo(Analyzer a, String input, String[] expectedImages, String[] expectedTypes, int[] expectedPosIncrs) throws Exception {
TokenStream ts = a.tokenStream("dummy", new StringReader(input));
- final Token reusableToken = new Token();
+ // TODO Java 1.5
+ //final TypeAttribute typeAtt = reusableToken.getAttribute(TypeAttribute.class);
+ //final PositionIncrementAttribute posIncrAtt = reusableToken.getAttribute(PositionIncrementAttribute.class);
+
+ final TermAttribute termAtt = (TermAttribute) ts.getAttribute(TermAttribute.class);
+ final TypeAttribute typeAtt = (TypeAttribute) ts.getAttribute(TypeAttribute.class);
+ final PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) ts.getAttribute(PositionIncrementAttribute.class);
+
for (int i = 0; i < expectedImages.length; i++) {
- Token nextToken = ts.next(reusableToken);
- assertNotNull(nextToken);
- assertEquals(expectedImages[i], nextToken.term());
+ assertTrue(ts.incrementToken());
+ assertEquals(expectedImages[i], new String(termAtt.termBuffer(), 0, termAtt.termLength()));
if (expectedTypes != null) {
- assertEquals(expectedTypes[i], nextToken.type());
+ assertEquals(expectedTypes[i], typeAtt.type());
}
if (expectedPosIncrs != null) {
- assertEquals(expectedPosIncrs[i], nextToken.getPositionIncrement());
+ assertEquals(expectedPosIncrs[i], posIncrAtt.getPositionIncrement());
}
}
- assertNull(ts.next(reusableToken));
+ assertFalse(ts.incrementToken());
ts.close();
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java Tue Nov 18 15:41:49 2008
@@ -17,6 +17,8 @@
* limitations under the License.
*/
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.LuceneTestCase;
import java.io.StringReader;
@@ -45,9 +47,10 @@
StringReader reader = new StringReader("This is a test of the english stop analyzer");
TokenStream stream = stop.tokenStream("test", reader);
assertTrue(stream != null);
- final Token reusableToken = new Token();
- for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
- assertFalse(inValidTokens.contains(nextToken.term()));
+ TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
+
+ while (stream.incrementToken()) {
+ assertFalse(inValidTokens.contains(termAtt.term()));
}
}
@@ -60,11 +63,13 @@
StringReader reader = new StringReader("This is a good test of the english stop analyzer");
TokenStream stream = newStop.tokenStream("test", reader);
assertNotNull(stream);
- final Token reusableToken = new Token();
- for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
- String text = nextToken.term();
+ TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
+ PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) stream.addAttribute(PositionIncrementAttribute.class);
+
+ while (stream.incrementToken()) {
+ String text = termAtt.term();
assertFalse(stopWordsSet.contains(text));
- assertEquals(1,nextToken.getPositionIncrement()); // by default stop tokenizer does not apply increments.
+ assertEquals(1,posIncrAtt.getPositionIncrement()); // by default stop tokenizer does not apply increments.
}
}
@@ -82,11 +87,13 @@
TokenStream stream = newStop.tokenStream("test", reader);
assertNotNull(stream);
int i = 0;
- final Token reusableToken = new Token();
- for (Token nextToken = stream.next(reusableToken); nextToken != null; nextToken = stream.next(reusableToken)) {
- String text = nextToken.term();
+ TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
+ PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) stream.addAttribute(PositionIncrementAttribute.class);
+
+ while (stream.incrementToken()) {
+ String text = termAtt.term();
assertFalse(stopWordsSet.contains(text));
- assertEquals(expectedIncr[i++],nextToken.getPositionIncrement());
+ assertEquals(expectedIncr[i++],posIncrAtt.getPositionIncrement());
}
} finally {
StopFilter.setEnablePositionIncrementsDefault(defaultEnable);
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java Tue Nov 18 15:41:49 2008
@@ -16,6 +16,8 @@
* limitations under the License.
*/
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
@@ -35,19 +37,22 @@
StringReader reader = new StringReader("Now is The Time");
String[] stopWords = new String[] { "is", "the", "Time" };
TokenStream stream = new StopFilter(new WhitespaceTokenizer(reader), stopWords);
- final Token reusableToken = new Token();
- assertEquals("Now", stream.next(reusableToken).term());
- assertEquals("The", stream.next(reusableToken).term());
- assertEquals(null, stream.next(reusableToken));
+ final TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
+ assertTrue(stream.incrementToken());
+ assertEquals("Now", termAtt.term());
+ assertTrue(stream.incrementToken());
+ assertEquals("The", termAtt.term());
+ assertFalse(stream.incrementToken());
}
public void testIgnoreCase() throws IOException {
StringReader reader = new StringReader("Now is The Time");
String[] stopWords = new String[] { "is", "the", "Time" };
TokenStream stream = new StopFilter(new WhitespaceTokenizer(reader), stopWords, true);
- final Token reusableToken = new Token();
- assertEquals("Now", stream.next(reusableToken).term());
- assertEquals(null,stream.next(reusableToken));
+ final TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
+ assertTrue(stream.incrementToken());
+ assertEquals("Now", termAtt.term());
+ assertFalse(stream.incrementToken());
}
public void testStopFilt() throws IOException {
@@ -55,10 +60,12 @@
String[] stopWords = new String[] { "is", "the", "Time" };
Set stopSet = StopFilter.makeStopSet(stopWords);
TokenStream stream = new StopFilter(new WhitespaceTokenizer(reader), stopSet);
- final Token reusableToken = new Token();
- assertEquals("Now", stream.next(reusableToken).term());
- assertEquals("The", stream.next(reusableToken).term());
- assertEquals(null, stream.next(reusableToken));
+ final TermAttribute termAtt = (TermAttribute) stream.getAttribute(TermAttribute.class);
+ assertTrue(stream.incrementToken());
+ assertEquals("Now", termAtt.term());
+ assertTrue(stream.incrementToken());
+ assertEquals("The", termAtt.term());
+ assertFalse(stream.incrementToken());
}
/**
@@ -110,15 +117,16 @@
private void doTestStopPositons(StopFilter stpf, boolean enableIcrements) throws IOException {
log("---> test with enable-increments-"+(enableIcrements?"enabled":"disabled"));
stpf.setEnablePositionIncrements(enableIcrements);
- final Token reusableToken = new Token();
+ TermAttribute termAtt = (TermAttribute) stpf.getAttribute(TermAttribute.class);
+ PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) stpf.getAttribute(PositionIncrementAttribute.class);
for (int i=0; i<20; i+=3) {
- Token nextToken = stpf.next(reusableToken);
- log("Token "+i+": "+nextToken);
+ assertTrue(stpf.incrementToken());
+ log("Token "+i+": "+stpf);
String w = English.intToEnglish(i).trim();
- assertEquals("expecting token "+i+" to be "+w,w,nextToken.term());
- assertEquals("all but first token must have position increment of 3",enableIcrements?(i==0?1:3):1,nextToken.getPositionIncrement());
+ assertEquals("expecting token "+i+" to be "+w,w,termAtt.term());
+ assertEquals("all but first token must have position increment of 3",enableIcrements?(i==0?1:3):1,posIncrAtt.getPositionIncrement());
}
- assertNull(stpf.next(reusableToken));
+ assertFalse(stpf.incrementToken());
}
// print debug info depending on VERBOSE
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestToken.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestToken.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestToken.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestToken.java Tue Nov 18 15:41:49 2008
@@ -19,6 +19,7 @@
import org.apache.lucene.util.LuceneTestCase;
+/** @deprecated */
public class TestToken extends LuceneTestCase {
public TestToken(String name) {
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestDocumentWriter.java Tue Nov 18 15:41:49 2008
@@ -22,12 +22,14 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
@@ -35,6 +37,7 @@
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@@ -138,33 +141,38 @@
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(new WhitespaceTokenizer(reader)) {
boolean first=true;
- Token buffered;
+ AttributeSource state;
- public Token next(final Token reusableToken) throws IOException {
- if (buffered != null) {
- Token nextToken = buffered;
- buffered=null;
- return nextToken;
+ public boolean incrementToken() throws IOException {
+ if (state != null) {
+ state.restoreState(this);
+ payloadAtt.setPayload(null);
+ posIncrAtt.setPositionIncrement(0);
+ termAtt.setTermBuffer(new char[]{'b'}, 0, 1);
+ state = null;
+ return true;
}
- Token nextToken = input.next(reusableToken);
- if (nextToken==null) return null;
- if (Character.isDigit(nextToken.termBuffer()[0])) {
- nextToken.setPositionIncrement(nextToken.termBuffer()[0] - '0');
+
+ boolean hasNext = input.incrementToken();
+ if (!hasNext) return false;
+ if (Character.isDigit(termAtt.termBuffer()[0])) {
+ posIncrAtt.setPositionIncrement(termAtt.termBuffer()[0] - '0');
}
if (first) {
// set payload on first position only
- nextToken.setPayload(new Payload(new byte[]{100}));
+ payloadAtt.setPayload(new Payload(new byte[]{100}));
first = false;
}
// index a "synonym" for every token
- buffered = (Token)nextToken.clone();
- buffered.setPayload(null);
- buffered.setPositionIncrement(0);
- buffered.setTermBuffer(new char[]{'b'}, 0, 1);
+ state = captureState();
+ return true;
- return nextToken;
}
+
+ TermAttribute termAtt = (TermAttribute) addAttribute(TermAttribute.class);
+ PayloadAttribute payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
+ PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
};
}
};
@@ -201,12 +209,14 @@
private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
private int index = 0;
- public Token next(final Token reusableToken) throws IOException {
- assert reusableToken != null;
+ private TermAttribute termAtt = (TermAttribute) addAttribute(TermAttribute.class);
+
+ public boolean incrementToken() throws IOException {
if (index == tokens.length) {
- return null;
+ return false;
} else {
- return reusableToken.reinit(tokens[index++], 0, 0);
+ termAtt.setTermBuffer(tokens[index++]);
+ return true;
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriter.java Tue Nov 18 15:41:49 2008
@@ -17,48 +17,48 @@
* limitations under the License.
*/
-import java.io.IOException;
-import java.io.Reader;
-import java.io.File;
import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
import java.io.PrintStream;
-import java.util.Arrays;
+import java.io.Reader;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Random;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.UnicodeUtil;
-
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SinkTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
-import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.Query;
import org.apache.lucene.search.spans.SpanTermQuery;
-import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.AlreadyClosedException;
-import org.apache.lucene.util._TestUtil;
-
-import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.LockFactory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SingleInstanceLockFactory;
+import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.UnicodeUtil;
+import org.apache.lucene.util._TestUtil;
/**
*
@@ -1793,11 +1793,11 @@
return new TokenFilter(new StandardTokenizer(reader)) {
private int count = 0;
- public Token next(final Token reusableToken) throws IOException {
+ public boolean incrementToken() throws IOException {
if (count++ == 5) {
throw new IOException();
}
- return input.next(reusableToken);
+ return input.incrementToken();
}
};
}
@@ -1916,10 +1916,10 @@
this.fieldName = fieldName;
}
- public Token next(final Token reusableToken) throws IOException {
+ public boolean incrementToken() throws IOException {
if (this.fieldName.equals("crash") && count++ >= 4)
throw new IOException("I'm experiencing problems");
- return input.next(reusableToken);
+ return input.incrementToken();
}
public void reset() throws IOException {
@@ -3577,21 +3577,47 @@
}
}
+ private static class MyAnalyzer extends Analyzer {
+
+ public TokenStream tokenStream(String fieldName, Reader reader) {
+ TokenStream s = new WhitespaceTokenizer(reader);
+ s.addAttribute(PositionIncrementAttribute.class);
+ return s;
+ }
+
+ }
+
// LUCENE-1255
public void testNegativePositions() throws Throwable {
SinkTokenizer tokens = new SinkTokenizer();
- Token t = new Token();
- t.setTermBuffer("a");
- t.setPositionIncrement(0);
- tokens.add(t);
- t.setTermBuffer("b");
- t.setPositionIncrement(1);
- tokens.add(t);
- t.setTermBuffer("c");
- tokens.add(t);
+ tokens.addAttribute(TermAttribute.class);
+ tokens.addAttribute(PositionIncrementAttribute.class);
+
+ AttributeSource state = new AttributeSource();
+ TermAttribute termAtt = (TermAttribute) state.addAttribute(TermAttribute.class);
+ PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) state.addAttribute(PositionIncrementAttribute.class);
+ termAtt.setTermBuffer("a");
+ posIncrAtt.setPositionIncrement(0);
+ tokens.add(state);
+
+ state = new AttributeSource();
+ termAtt = (TermAttribute) state.addAttribute(TermAttribute.class);
+ posIncrAtt = (PositionIncrementAttribute) state.addAttribute(PositionIncrementAttribute.class);
+
+ termAtt.setTermBuffer("b");
+ posIncrAtt.setPositionIncrement(1);
+ tokens.add(state);
+
+ state = new AttributeSource();
+ termAtt = (TermAttribute) state.addAttribute(TermAttribute.class);
+ posIncrAtt = (PositionIncrementAttribute) state.addAttribute(PositionIncrementAttribute.class);
+
+ termAtt.setTermBuffer("c");
+ posIncrAtt.setPositionIncrement(1);
+ tokens.add(state);
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new MyAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java Tue Nov 18 15:41:49 2008
@@ -20,19 +20,18 @@
import java.io.IOException;
import java.io.Reader;
-import org.apache.lucene.util.LuceneTestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
/**
* This testcase tests whether multi-level skipping is being used
@@ -99,17 +98,19 @@
private static class PayloadFilter extends TokenFilter {
static int count = 0;
+ PayloadAttribute payloadAtt;
+
protected PayloadFilter(TokenStream input) {
super(input);
+ payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
}
- public Token next(final Token reusableToken) throws IOException {
- assert reusableToken != null;
- Token nextToken = input.next(reusableToken);
- if (nextToken != null) {
- nextToken.setPayload(new Payload(new byte[] { (byte) count++ }));
- }
- return nextToken;
+ public boolean incrementToken() throws IOException {
+ boolean hasNext = input.incrementToken();
+ if (hasNext) {
+ payloadAtt.setPayload(new Payload(new byte[] { (byte) count++ }));
+ }
+ return hasNext;
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestPayloads.java Tue Nov 18 15:41:49 2008
@@ -27,20 +27,20 @@
import java.util.Map;
import java.util.Random;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.UnicodeUtil;
-
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.UnicodeUtil;
public class TestPayloads extends LuceneTestCase {
@@ -442,32 +442,33 @@
private int length;
private int offset;
Payload payload = new Payload();
+ PayloadAttribute payloadAtt;
public PayloadFilter(TokenStream in, byte[] data, int offset, int length) {
super(in);
this.data = data;
this.length = length;
this.offset = offset;
+ payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
}
- public Token next(final Token reusableToken) throws IOException {
- assert reusableToken != null;
- Token nextToken = input.next(reusableToken);
- if (nextToken != null) {
+ public boolean incrementToken() throws IOException {
+ boolean hasNext = input.incrementToken();
+ if (hasNext) {
if (offset + length <= data.length) {
Payload p = null;
if (p == null) {
p = new Payload();
- nextToken.setPayload(p);
+ payloadAtt.setPayload(p);
}
p.setData(data, offset, length);
offset += length;
} else {
- nextToken.setPayload(null);
+ payloadAtt.setPayload(null);
}
}
- return nextToken;
+ return hasNext;
}
}
@@ -529,19 +530,25 @@
private boolean first;
private ByteArrayPool pool;
private String term;
+
+ TermAttribute termAtt;
+ PayloadAttribute payloadAtt;
+
PoolingPayloadTokenStream(ByteArrayPool pool) {
this.pool = pool;
payload = pool.get();
generateRandomData(payload);
term = pool.bytesToString(payload);
first = true;
+ payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
+ termAtt = (TermAttribute) addAttribute(TermAttribute.class);
}
- public Token next(final Token reusableToken) throws IOException {
- if (!first) return null;
- reusableToken.reinit(term, 0, 0);
- reusableToken.setPayload(new Payload(payload));
- return reusableToken;
+ public boolean incrementToken() throws IOException {
+ if (!first) return false;
+ termAtt.setTermBuffer(term);
+ payloadAtt.setPayload(new Payload(payload));
+ return true;
}
public void close() throws IOException {
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestTermVectorsReader.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestTermVectorsReader.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestTermVectorsReader.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestTermVectorsReader.java Tue Nov 18 15:41:49 2008
@@ -17,14 +17,6 @@
* limitations under the License.
*/
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.util.LuceneTestCase;
-
import java.io.IOException;
import java.io.Reader;
import java.util.Arrays;
@@ -32,6 +24,16 @@
import java.util.Map;
import java.util.SortedSet;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
public class TestTermVectorsReader extends LuceneTestCase {
//Must be lexicographically sorted, will do in setup, versus trying to maintain here
private String[] testFields = {"f1", "f2", "f3", "f4"};
@@ -118,17 +120,31 @@
private class MyTokenStream extends TokenStream {
int tokenUpto;
- public Token next(final Token reusableToken) {
+
+ TermAttribute termAtt;
+ PositionIncrementAttribute posIncrAtt;
+ OffsetAttribute offsetAtt;
+
+ public MyTokenStream() {
+ termAtt = (TermAttribute) addAttribute(TermAttribute.class);
+ posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
+ offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
+ }
+
+ public boolean incrementToken() {
if (tokenUpto >= tokens.length)
- return null;
+ return false;
else {
final TestToken testToken = tokens[tokenUpto++];
- reusableToken.reinit(testToken.text, testToken.startOffset, testToken.endOffset);
- if (tokenUpto > 1)
- reusableToken.setPositionIncrement(testToken.pos - tokens[tokenUpto-2].pos);
- else
- reusableToken.setPositionIncrement(testToken.pos+1);
- return reusableToken;
+ termAtt.setTermBuffer(testToken.text);
+ offsetAtt.setStartOffset(testToken.startOffset);
+ offsetAtt.setEndOffset(testToken.endOffset);
+ if (tokenUpto > 1) {
+ posIncrAtt.setPositionIncrement(testToken.pos - tokens[tokenUpto-2].pos);
+ } else {
+ posIncrAtt.setPositionIncrement(testToken.pos+1);
+ }
+ return true;
}
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestTermdocPerf.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestTermdocPerf.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestTermdocPerf.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestTermdocPerf.java Tue Nov 18 15:41:49 2008
@@ -17,18 +17,18 @@
*/
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.RAMDirectory;
+import java.io.IOException;
+import java.io.Reader;
+import java.util.Random;
+
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-
-import java.io.Reader;
-import java.io.IOException;
-import java.util.Random;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
/**
* @version $Id$
@@ -36,15 +36,21 @@
class RepeatingTokenStream extends TokenStream {
public int num;
- Token t;
+ TermAttribute termAtt;
+ String value;
public RepeatingTokenStream(String val) {
- t = new Token(0,val.length());
- t.setTermBuffer(val);
+ this.value = val;
+ this.termAtt = (TermAttribute) addAttribute(TermAttribute.class);
}
- public Token next(final Token reusableToken) throws IOException {
- return --num<0 ? null : (Token) t.clone();
+ public boolean incrementToken() throws IOException {
+ num--;
+ if (num >= 0) {
+ termAtt.setTermBuffer(value);
+ return true;
+ }
+ return false;
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java Tue Nov 18 15:41:49 2008
@@ -17,17 +17,20 @@
* limitations under the License.
*/
+import java.io.IOException;
import java.io.Reader;
-import org.apache.lucene.util.LuceneTestCase;
-
-import org.apache.lucene.search.Query;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Test QueryParser's ability to deal with Analyzers that return more
@@ -140,34 +143,49 @@
private final class TestFilter extends TokenFilter {
- private Token prevToken;
+ private String prevType;
+ private int prevStartOffset;
+ private int prevEndOffset;
+
+ TermAttribute termAtt;
+ PositionIncrementAttribute posIncrAtt;
+ OffsetAttribute offsetAtt;
+ TypeAttribute typeAtt;
public TestFilter(TokenStream in) {
super(in);
+ termAtt = (TermAttribute) addAttribute(TermAttribute.class);
+ posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
+ offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
+ typeAtt = (TypeAttribute) addAttribute(TypeAttribute.class);
}
- public final Token next(final Token reusableToken) throws java.io.IOException {
+ public final boolean incrementToken() throws java.io.IOException {
if (multiToken > 0) {
- reusableToken.reinit("multi"+(multiToken+1), prevToken.startOffset(), prevToken.endOffset(), prevToken.type());
- reusableToken.setPositionIncrement(0);
+ termAtt.setTermBuffer("multi"+(multiToken+1));
+ offsetAtt.setStartOffset(prevStartOffset);
+ offsetAtt.setEndOffset(prevEndOffset);
+ typeAtt.setType(prevType);
+ posIncrAtt.setPositionIncrement(0);
multiToken--;
- return reusableToken;
+ return true;
} else {
- Token nextToken = input.next(reusableToken);
- if (nextToken == null) {
- prevToken = null;
- return null;
+ boolean next = input.incrementToken();
+ if (next == false) {
+ return false;
}
- prevToken = (Token) nextToken.clone();
- String text = nextToken.term();
+ prevType = typeAtt.type();
+ prevStartOffset = offsetAtt.startOffset();
+ prevEndOffset = offsetAtt.endOffset();
+ String text = termAtt.term();
if (text.equals("triplemulti")) {
multiToken = 2;
- return nextToken;
+ return true;
} else if (text.equals("multi")) {
multiToken = 1;
- return nextToken;
+ return true;
} else {
- return nextToken;
+ return true;
}
}
}
@@ -192,23 +210,28 @@
private final class TestPosIncrementFilter extends TokenFilter {
+ TermAttribute termAtt;
+ PositionIncrementAttribute posIncrAtt;
+
public TestPosIncrementFilter(TokenStream in) {
super(in);
+ termAtt = (TermAttribute) addAttribute(TermAttribute.class);
+ posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
}
- public final Token next(final Token reusableToken) throws java.io.IOException {
- for (Token nextToken = input.next(reusableToken); nextToken != null; nextToken = input.next(reusableToken)) {
- if (nextToken.term().equals("the")) {
+ public final boolean incrementToken () throws java.io.IOException {
+ while(input.incrementToken()) {
+ if (termAtt.term().equals("the")) {
// stopword, do nothing
- } else if (nextToken.term().equals("quick")) {
- nextToken.setPositionIncrement(2);
- return nextToken;
+ } else if (termAtt.term().equals("quick")) {
+ posIncrAtt.setPositionIncrement(2);
+ return true;
} else {
- nextToken.setPositionIncrement(1);
- return nextToken;
+ posIncrAtt.setPositionIncrement(1);
+ return true;
}
}
- return null;
+ return false;
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java Tue Nov 18 15:41:49 2008
@@ -22,7 +22,6 @@
import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
Modified: lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestQueryParser.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestQueryParser.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestQueryParser.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/queryParser/TestQueryParser.java Tue Nov 18 15:41:49 2008
@@ -19,8 +19,8 @@
import java.io.IOException;
import java.io.Reader;
-import java.text.DateFormat;
import java.text.Collator;
+import java.text.DateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.Locale;
@@ -31,11 +31,12 @@
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.DateField;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.Document;
@@ -64,36 +65,47 @@
public static Analyzer qpAnalyzer = new QPTestAnalyzer();
public static class QPTestFilter extends TokenFilter {
+ TermAttribute termAtt;
+ OffsetAttribute offsetAtt;
+
/**
* Filter which discards the token 'stop' and which expands the
* token 'phrase' into 'phrase1 phrase2'
*/
public QPTestFilter(TokenStream in) {
super(in);
+ termAtt = (TermAttribute) addAttribute(TermAttribute.class);
+ offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
}
boolean inPhrase = false;
int savedStart = 0, savedEnd = 0;
- public Token next(final Token reusableToken) throws IOException {
- assert reusableToken != null;
+ public boolean incrementToken() throws IOException {
if (inPhrase) {
inPhrase = false;
- return reusableToken.reinit("phrase2", savedStart, savedEnd);
+ termAtt.setTermBuffer("phrase2");
+ offsetAtt.setStartOffset(savedStart);
+ offsetAtt.setEndOffset(savedEnd);
+ return true;
} else
- for (Token nextToken = input.next(reusableToken); nextToken != null; nextToken = input.next(reusableToken)) {
- if (nextToken.term().equals("phrase")) {
+ while (input.incrementToken()) {
+ if (termAtt.term().equals("phrase")) {
inPhrase = true;
- savedStart = nextToken.startOffset();
- savedEnd = nextToken.endOffset();
- return nextToken.reinit("phrase1", savedStart, savedEnd);
- } else if (!nextToken.term().equals("stop"))
- return nextToken;
+ savedStart = offsetAtt.startOffset();
+ savedEnd = offsetAtt.endOffset();
+ termAtt.setTermBuffer("phrase1");
+ offsetAtt.setStartOffset(savedStart);
+ offsetAtt.setEndOffset(savedEnd);
+ return true;
+ } else if (!termAtt.term().equals("stop"))
+ return true;
}
- return null;
+ return false;
}
}
+
public static class QPTestAnalyzer extends Analyzer {
/** Filters LowerCaseTokenizer with StopFilter. */
Modified: lucene/java/trunk/src/test/org/apache/lucene/search/TestPositionIncrement.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/search/TestPositionIncrement.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/search/TestPositionIncrement.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/search/TestPositionIncrement.java Tue Nov 18 15:41:49 2008
@@ -17,14 +17,16 @@
* limitations under the License.
*/
+import java.io.IOException;
import java.io.Reader;
-import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
@@ -49,14 +51,19 @@
private final int[] INCREMENTS = {1, 2, 1, 0, 1};
private int i = 0;
- public Token next(final Token reusableToken) {
- assert reusableToken != null;
+ PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
+ TermAttribute termAtt = (TermAttribute) addAttribute(TermAttribute.class);
+ OffsetAttribute offsetAtt = (OffsetAttribute) addAttribute(OffsetAttribute.class);
+
+ public boolean incrementToken() {
if (i == TOKENS.length)
- return null;
- reusableToken.reinit(TOKENS[i], i, i);
- reusableToken.setPositionIncrement(INCREMENTS[i]);
+ return false;
+ termAtt.setTermBuffer(TOKENS[i]);
+ offsetAtt.setStartOffset(i);
+ offsetAtt.setEndOffset(i);
+ posIncrAtt.setPositionIncrement(INCREMENTS[i]);
i++;
- return reusableToken;
+ return true;
}
};
}
@@ -196,18 +203,4 @@
StopFilter.setEnablePositionIncrementsDefault(dflt);
}
}
-
- /**
- * Basic analyzer behavior should be to keep sequential terms in one
- * increment from one another.
- */
- public void testIncrementingPositions() throws Exception {
- Analyzer analyzer = new WhitespaceAnalyzer();
- TokenStream ts = analyzer.tokenStream("field",
- new StringReader("one two three four five"));
- final Token reusableToken = new Token();
- for (Token nextToken = ts.next(reusableToken); nextToken != null; nextToken = ts.next(reusableToken)) {
- assertEquals(nextToken.term(), 1, nextToken.getPositionIncrement());
- }
- }
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/search/TestRangeQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/search/TestRangeQuery.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/search/TestRangeQuery.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/search/TestRangeQuery.java Tue Nov 18 15:41:49 2008
@@ -26,7 +26,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
@@ -236,23 +236,25 @@
private static class SingleCharTokenizer extends Tokenizer {
char[] buffer = new char[1];
boolean done;
-
+ TermAttribute termAtt;
+
public SingleCharTokenizer(Reader r) {
super(r);
+ termAtt = (TermAttribute) addAttribute(TermAttribute.class);
}
- public final Token next(final Token reusableToken) throws IOException {
+ public boolean incrementToken() throws IOException {
int count = input.read(buffer);
if (done)
- return null;
+ return false;
else {
done = true;
if (count == 1) {
- reusableToken.termBuffer()[0] = buffer[0];
- reusableToken.setTermLength(1);
+ termAtt.termBuffer()[0] = buffer[0];
+ termAtt.setTermLength(1);
} else
- reusableToken.setTermLength(0);
- return reusableToken;
+ termAtt.setTermLength(0);
+ return true;
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/search/payloads/PayloadHelper.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/search/payloads/PayloadHelper.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/search/payloads/PayloadHelper.java Tue Nov 18 15:41:49 2008
@@ -2,6 +2,7 @@
import org.apache.lucene.analysis.*;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.index.Payload;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.RAMDirectory;
@@ -41,34 +42,36 @@
public class PayloadFilter extends TokenFilter {
String fieldName;
int numSeen = 0;
-
+ PayloadAttribute payloadAtt;
+
public PayloadFilter(TokenStream input, String fieldName) {
super(input);
this.fieldName = fieldName;
+ payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
}
- public Token next() throws IOException {
- Token result = input.next();
- if (result != null) {
+ public boolean incrementToken() throws IOException {
+
+ if (input.incrementToken()) {
if (fieldName.equals(FIELD))
{
- result.setPayload(new Payload(payloadField));
+ payloadAtt.setPayload(new Payload(payloadField));
}
else if (fieldName.equals(MULTI_FIELD))
{
if (numSeen % 2 == 0)
{
- result.setPayload(new Payload(payloadMultiField1));
+ payloadAtt.setPayload(new Payload(payloadMultiField1));
}
else
{
- result.setPayload(new Payload(payloadMultiField2));
+ payloadAtt.setPayload(new Payload(payloadMultiField2));
}
numSeen++;
}
-
+ return true;
}
- return result;
+ return false;
}
}
Modified: lucene/java/trunk/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java?rev=718798&r1=718797&r2=718798&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java Tue Nov 18 15:41:49 2008
@@ -21,9 +21,9 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
@@ -66,29 +66,32 @@
private class PayloadFilter extends TokenFilter {
String fieldName;
int numSeen = 0;
-
+
+ PayloadAttribute payloadAtt;
+
public PayloadFilter(TokenStream input, String fieldName) {
super(input);
this.fieldName = fieldName;
+ payloadAtt = (PayloadAttribute) addAttribute(PayloadAttribute.class);
}
-
- public Token next(final Token reusableToken) throws IOException {
- assert reusableToken != null;
- Token nextToken = input.next(reusableToken);
- if (nextToken != null) {
+
+ public boolean incrementToken() throws IOException {
+ boolean hasNext = input.incrementToken();
+ if (hasNext) {
if (fieldName.equals("field")) {
- nextToken.setPayload(new Payload(payloadField));
+ payloadAtt.setPayload(new Payload(payloadField));
} else if (fieldName.equals("multiField")) {
if (numSeen % 2 == 0) {
- nextToken.setPayload(new Payload(payloadMultiField1));
+ payloadAtt.setPayload(new Payload(payloadMultiField1));
} else {
- nextToken.setPayload(new Payload(payloadMultiField2));
+ payloadAtt.setPayload(new Payload(payloadMultiField2));
}
numSeen++;
}
-
+ return true;
+ } else {
+ return false;
}
- return nextToken;
}
}