You are viewing a plain text version of this content. The canonical link for it is here.
Posted to java-commits@lucene.apache.org by bu...@apache.org on 2008/05/20 09:40:56 UTC
svn commit: r658136 [3/4] - in /lucene/java/trunk/src:
java/org/apache/lucene/ java/org/apache/lucene/analysis/
java/org/apache/lucene/document/ java/org/apache/lucene/index/
java/org/apache/lucene/search/ java/org/apache/lucene/util/
test/org/apache/l...
Propchange: lucene/java/trunk/src/java/org/apache/lucene/search/TopDocCollector.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/java/org/apache/lucene/search/TopFieldDocCollector.java
------------------------------------------------------------------------------
svn:eol-style = native
Modified: lucene/java/trunk/src/java/org/apache/lucene/util/SmallFloat.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/util/SmallFloat.java?rev=658136&r1=658135&r2=658136&view=diff
==============================================================================
--- lucene/java/trunk/src/java/org/apache/lucene/util/SmallFloat.java (original)
+++ lucene/java/trunk/src/java/org/apache/lucene/util/SmallFloat.java Tue May 20 00:40:54 2008
@@ -1,125 +1,125 @@
-package org.apache.lucene.util;
-/**
- * Copyright 2005 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/** Floating point numbers smaller than 32 bits.
- *
- * @author yonik
- * @version $Id$
- */
-public class SmallFloat {
-
- /** Converts a 32 bit float to an 8 bit float.
- * <br>Values less than zero are all mapped to zero.
- * <br>Values are truncated (rounded down) to the nearest 8 bit value.
- * <br>Values between zero and the smallest representable value
- * are rounded up.
- *
- * @param f the 32 bit float to be converted to an 8 bit float (byte)
- * @param numMantissaBits the number of mantissa bits to use in the byte, with the remainder to be used in the exponent
- * @param zeroExp the zero-point in the range of exponent values
- * @return the 8 bit float representation
- */
- public static byte floatToByte(float f, int numMantissaBits, int zeroExp) {
- // Adjustment from a float zero exponent to our zero exponent,
- // shifted over to our exponent position.
- int fzero = (63-zeroExp)<<numMantissaBits;
- int bits = Float.floatToRawIntBits(f);
- int smallfloat = bits >> (24-numMantissaBits);
- if (smallfloat < fzero) {
- return (bits<=0) ?
- (byte)0 // negative numbers and zero both map to 0 byte
- :(byte)1; // underflow is mapped to smallest non-zero number.
- } else if (smallfloat >= fzero + 0x100) {
- return -1; // overflow maps to largest number
- } else {
- return (byte)(smallfloat - fzero);
- }
- }
-
- /** Converts an 8 bit float to a 32 bit float. */
- public static float byteToFloat(byte b, int numMantissaBits, int zeroExp) {
- // on Java1.5 & 1.6 JVMs, prebuilding a decoding array and doing a lookup
- // is only a little bit faster (anywhere from 0% to 7%)
- if (b == 0) return 0.0f;
- int bits = (b&0xff) << (24-numMantissaBits);
- bits += (63-zeroExp) << 24;
- return Float.intBitsToFloat(bits);
- }
-
-
- //
- // Some specializations of the generic functions follow.
- // The generic functions are just as fast with current (1.5)
- // -server JVMs, but still slower with client JVMs.
- //
-
- /** floatToByte(b, mantissaBits=3, zeroExponent=15)
- * <br>smallest non-zero value = 5.820766E-10
- * <br>largest value = 7.5161928E9
- * <br>epsilon = 0.125
- */
- public static byte floatToByte315(float f) {
- int bits = Float.floatToRawIntBits(f);
- int smallfloat = bits >> (24-3);
- if (smallfloat < (63-15)<<3) {
- return (bits<=0) ? (byte)0 : (byte)1;
- }
- if (smallfloat >= ((63-15)<<3) + 0x100) {
- return -1;
- }
- return (byte)(smallfloat - ((63-15)<<3));
- }
-
- /** byteToFloat(b, mantissaBits=3, zeroExponent=15) */
- public static float byte315ToFloat(byte b) {
- // on Java1.5 & 1.6 JVMs, prebuilding a decoding array and doing a lookup
- // is only a little bit faster (anywhere from 0% to 7%)
- if (b == 0) return 0.0f;
- int bits = (b&0xff) << (24-3);
- bits += (63-15) << 24;
- return Float.intBitsToFloat(bits);
- }
-
-
- /** floatToByte(b, mantissaBits=5, zeroExponent=2)
- * <br>smallest nonzero value = 0.033203125
- * <br>largest value = 1984.0
- * <br>epsilon = 0.03125
- */
- public static byte floatToByte52(float f) {
- int bits = Float.floatToRawIntBits(f);
- int smallfloat = bits >> (24-5);
- if (smallfloat < (63-2)<<5) {
- return (bits<=0) ? (byte)0 : (byte)1;
- }
- if (smallfloat >= ((63-2)<<5) + 0x100) {
- return -1;
- }
- return (byte)(smallfloat - ((63-2)<<5));
- }
-
- /** byteToFloat(b, mantissaBits=5, zeroExponent=2) */
- public static float byte52ToFloat(byte b) {
- // on Java1.5 & 1.6 JVMs, prebuilding a decoding array and doing a lookup
- // is only a little bit faster (anywhere from 0% to 7%)
- if (b == 0) return 0.0f;
- int bits = (b&0xff) << (24-5);
- bits += (63-2) << 24;
- return Float.intBitsToFloat(bits);
- }
-}
+package org.apache.lucene.util;
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/** Floating point numbers smaller than 32 bits.
+ *
+ * @author yonik
+ * @version $Id$
+ */
+public class SmallFloat {
+
+ /** Converts a 32 bit float to an 8 bit float.
+ * <br>Values less than zero are all mapped to zero.
+ * <br>Values are truncated (rounded down) to the nearest 8 bit value.
+ * <br>Values between zero and the smallest representable value
+ * are rounded up.
+ *
+ * @param f the 32 bit float to be converted to an 8 bit float (byte)
+ * @param numMantissaBits the number of mantissa bits to use in the byte, with the remainder to be used in the exponent
+ * @param zeroExp the zero-point in the range of exponent values
+ * @return the 8 bit float representation
+ */
+ public static byte floatToByte(float f, int numMantissaBits, int zeroExp) {
+ // Adjustment from a float zero exponent to our zero exponent,
+ // shifted over to our exponent position.
+ int fzero = (63-zeroExp)<<numMantissaBits;
+ int bits = Float.floatToRawIntBits(f);
+ int smallfloat = bits >> (24-numMantissaBits);
+ if (smallfloat < fzero) {
+ return (bits<=0) ?
+ (byte)0 // negative numbers and zero both map to 0 byte
+ :(byte)1; // underflow is mapped to smallest non-zero number.
+ } else if (smallfloat >= fzero + 0x100) {
+ return -1; // overflow maps to largest number
+ } else {
+ return (byte)(smallfloat - fzero);
+ }
+ }
+
+ /** Converts an 8 bit float to a 32 bit float. */
+ public static float byteToFloat(byte b, int numMantissaBits, int zeroExp) {
+ // on Java1.5 & 1.6 JVMs, prebuilding a decoding array and doing a lookup
+ // is only a little bit faster (anywhere from 0% to 7%)
+ if (b == 0) return 0.0f;
+ int bits = (b&0xff) << (24-numMantissaBits);
+ bits += (63-zeroExp) << 24;
+ return Float.intBitsToFloat(bits);
+ }
+
+
+ //
+ // Some specializations of the generic functions follow.
+ // The generic functions are just as fast with current (1.5)
+ // -server JVMs, but still slower with client JVMs.
+ //
+
+ /** floatToByte(b, mantissaBits=3, zeroExponent=15)
+ * <br>smallest non-zero value = 5.820766E-10
+ * <br>largest value = 7.5161928E9
+ * <br>epsilon = 0.125
+ */
+ public static byte floatToByte315(float f) {
+ int bits = Float.floatToRawIntBits(f);
+ int smallfloat = bits >> (24-3);
+ if (smallfloat < (63-15)<<3) {
+ return (bits<=0) ? (byte)0 : (byte)1;
+ }
+ if (smallfloat >= ((63-15)<<3) + 0x100) {
+ return -1;
+ }
+ return (byte)(smallfloat - ((63-15)<<3));
+ }
+
+ /** byteToFloat(b, mantissaBits=3, zeroExponent=15) */
+ public static float byte315ToFloat(byte b) {
+ // on Java1.5 & 1.6 JVMs, prebuilding a decoding array and doing a lookup
+ // is only a little bit faster (anywhere from 0% to 7%)
+ if (b == 0) return 0.0f;
+ int bits = (b&0xff) << (24-3);
+ bits += (63-15) << 24;
+ return Float.intBitsToFloat(bits);
+ }
+
+
+ /** floatToByte(b, mantissaBits=5, zeroExponent=2)
+ * <br>smallest nonzero value = 0.033203125
+ * <br>largest value = 1984.0
+ * <br>epsilon = 0.03125
+ */
+ public static byte floatToByte52(float f) {
+ int bits = Float.floatToRawIntBits(f);
+ int smallfloat = bits >> (24-5);
+ if (smallfloat < (63-2)<<5) {
+ return (bits<=0) ? (byte)0 : (byte)1;
+ }
+ if (smallfloat >= ((63-2)<<5) + 0x100) {
+ return -1;
+ }
+ return (byte)(smallfloat - ((63-2)<<5));
+ }
+
+ /** byteToFloat(b, mantissaBits=5, zeroExponent=2) */
+ public static float byte52ToFloat(byte b) {
+ // on Java1.5 & 1.6 JVMs, prebuilding a decoding array and doing a lookup
+ // is only a little bit faster (anywhere from 0% to 7%)
+ if (b == 0) return 0.0f;
+ int bits = (b&0xff) << (24-5);
+ bits += (63-2) << 24;
+ return Float.intBitsToFloat(bits);
+ }
+}
Propchange: lucene/java/trunk/src/java/org/apache/lucene/util/SmallFloat.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/java/org/apache/lucene/util/ToStringUtils.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/test/org/apache/lucene/TestHitIterator.java
------------------------------------------------------------------------------
svn:eol-style = native
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java?rev=658136&r1=658135&r2=658136&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java Tue May 20 00:40:54 2008
@@ -1,103 +1,103 @@
-package org.apache.lucene.analysis;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-import java.io.IOException;
-
-import org.apache.lucene.util.LuceneTestCase;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.TermVector;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermPositions;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.RAMDirectory;
-
-public class TestCachingTokenFilter extends LuceneTestCase {
- private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
-
- public void testCaching() throws IOException {
- Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
- Document doc = new Document();
- TokenStream stream = new TokenStream() {
- private int index = 0;
-
- public Token next() throws IOException {
- if (index == tokens.length) {
- return null;
- } else {
- return new Token(tokens[index++], 0, 0);
- }
- }
-
- };
-
- stream = new CachingTokenFilter(stream);
-
- doc.add(new Field("preanalyzed", stream, TermVector.NO));
-
- // 1) we consume all tokens twice before we add the doc to the index
- checkTokens(stream);
- stream.reset();
- checkTokens(stream);
-
- // 2) now add the document to the index and verify if all tokens are indexed
- // don't reset the stream here, the DocumentWriter should do that implicitly
- writer.addDocument(doc);
- writer.close();
-
- IndexReader reader = IndexReader.open(dir);
- TermPositions termPositions = reader.termPositions(new Term("preanalyzed", "term1"));
- assertTrue(termPositions.next());
- assertEquals(1, termPositions.freq());
- assertEquals(0, termPositions.nextPosition());
-
- termPositions.seek(new Term("preanalyzed", "term2"));
- assertTrue(termPositions.next());
- assertEquals(2, termPositions.freq());
- assertEquals(1, termPositions.nextPosition());
- assertEquals(3, termPositions.nextPosition());
-
- termPositions.seek(new Term("preanalyzed", "term3"));
- assertTrue(termPositions.next());
- assertEquals(1, termPositions.freq());
- assertEquals(2, termPositions.nextPosition());
- reader.close();
-
- // 3) reset stream and consume tokens again
- stream.reset();
- checkTokens(stream);
- }
-
- private void checkTokens(TokenStream stream) throws IOException {
- int count = 0;
- Token token;
- while ((token = stream.next()) != null) {
- assertTrue(count < tokens.length);
- assertEquals(tokens[count], token.termText());
- count++;
- }
-
- assertEquals(tokens.length, count);
- }
-}
+package org.apache.lucene.analysis;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.io.IOException;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermPositions;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+
+public class TestCachingTokenFilter extends LuceneTestCase {
+ private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
+
+ public void testCaching() throws IOException {
+ Directory dir = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
+ Document doc = new Document();
+ TokenStream stream = new TokenStream() {
+ private int index = 0;
+
+ public Token next() throws IOException {
+ if (index == tokens.length) {
+ return null;
+ } else {
+ return new Token(tokens[index++], 0, 0);
+ }
+ }
+
+ };
+
+ stream = new CachingTokenFilter(stream);
+
+ doc.add(new Field("preanalyzed", stream, TermVector.NO));
+
+ // 1) we consume all tokens twice before we add the doc to the index
+ checkTokens(stream);
+ stream.reset();
+ checkTokens(stream);
+
+ // 2) now add the document to the index and verify if all tokens are indexed
+ // don't reset the stream here, the DocumentWriter should do that implicitly
+ writer.addDocument(doc);
+ writer.close();
+
+ IndexReader reader = IndexReader.open(dir);
+ TermPositions termPositions = reader.termPositions(new Term("preanalyzed", "term1"));
+ assertTrue(termPositions.next());
+ assertEquals(1, termPositions.freq());
+ assertEquals(0, termPositions.nextPosition());
+
+ termPositions.seek(new Term("preanalyzed", "term2"));
+ assertTrue(termPositions.next());
+ assertEquals(2, termPositions.freq());
+ assertEquals(1, termPositions.nextPosition());
+ assertEquals(3, termPositions.nextPosition());
+
+ termPositions.seek(new Term("preanalyzed", "term3"));
+ assertTrue(termPositions.next());
+ assertEquals(1, termPositions.freq());
+ assertEquals(2, termPositions.nextPosition());
+ reader.close();
+
+ // 3) reset stream and consume tokens again
+ stream.reset();
+ checkTokens(stream);
+ }
+
+ private void checkTokens(TokenStream stream) throws IOException {
+ int count = 0;
+ Token token;
+ while ((token = stream.next()) != null) {
+ assertTrue(count < tokens.length);
+ assertEquals(tokens[count], token.termText());
+ count++;
+ }
+
+ assertEquals(tokens.length, count);
+ }
+}
Propchange: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestLengthFilter.java
------------------------------------------------------------------------------
svn:eol-style = native
Modified: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java?rev=658136&r1=658135&r2=658136&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java Tue May 20 00:40:54 2008
@@ -1,128 +1,128 @@
-package org.apache.lucene.analysis;
-
-/**
- * Copyright 2005 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.util.English;
-import org.apache.lucene.util.LuceneTestCase;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.util.ArrayList;
-import java.util.Set;
-
-/**
- * @author yonik
- */
-public class TestStopFilter extends LuceneTestCase {
-
- private final static boolean VERBOSE = false;
-
- // other StopFilter functionality is already tested by TestStopAnalyzer
-
- public void testExactCase() throws IOException {
- StringReader reader = new StringReader("Now is The Time");
- String[] stopWords = new String[] { "is", "the", "Time" };
- TokenStream stream = new StopFilter(new WhitespaceTokenizer(reader), stopWords);
- assertEquals("Now", stream.next().termText());
- assertEquals("The", stream.next().termText());
- assertEquals(null, stream.next());
- }
-
- public void testIgnoreCase() throws IOException {
- StringReader reader = new StringReader("Now is The Time");
- String[] stopWords = new String[] { "is", "the", "Time" };
- TokenStream stream = new StopFilter(new WhitespaceTokenizer(reader), stopWords, true);
- assertEquals("Now", stream.next().termText());
- assertEquals(null,stream.next());
- }
-
- public void testStopFilt() throws IOException {
- StringReader reader = new StringReader("Now is The Time");
- String[] stopWords = new String[] { "is", "the", "Time" };
- Set stopSet = StopFilter.makeStopSet(stopWords);
- TokenStream stream = new StopFilter(new WhitespaceTokenizer(reader), stopSet);
- assertEquals("Now", stream.next().termText());
- assertEquals("The", stream.next().termText());
- assertEquals(null, stream.next());
- }
-
- /**
- * Test Position increments applied by StopFilter with and without enabling this option.
- */
- public void testStopPositons() throws IOException {
- StringBuffer sb = new StringBuffer();
- ArrayList a = new ArrayList();
- for (int i=0; i<20; i++) {
- String w = English.intToEnglish(i).trim();
- sb.append(w).append(" ");
- if (i%3 != 0) a.add(w);
- }
- log(sb.toString());
- String stopWords[] = (String[]) a.toArray(new String[0]);
- for (int i=0; i<a.size(); i++) log("Stop: "+stopWords[i]);
- Set stopSet = StopFilter.makeStopSet(stopWords);
- // with increments
- StringReader reader = new StringReader(sb.toString());
- StopFilter stpf = new StopFilter(new WhitespaceTokenizer(reader), stopSet);
- doTestStopPositons(stpf,true);
- // without increments
- reader = new StringReader(sb.toString());
- stpf = new StopFilter(new WhitespaceTokenizer(reader), stopSet);
- doTestStopPositons(stpf,false);
- // with increments, concatenating two stop filters
- ArrayList a0 = new ArrayList();
- ArrayList a1 = new ArrayList();
- for (int i=0; i<a.size(); i++) {
- if (i%2==0) {
- a0.add(a.get(i));
- } else {
- a1.add(a.get(i));
- }
- }
- String stopWords0[] = (String[]) a0.toArray(new String[0]);
- for (int i=0; i<a0.size(); i++) log("Stop0: "+stopWords0[i]);
- String stopWords1[] = (String[]) a1.toArray(new String[0]);
- for (int i=0; i<a1.size(); i++) log("Stop1: "+stopWords1[i]);
- Set stopSet0 = StopFilter.makeStopSet(stopWords0);
- Set stopSet1 = StopFilter.makeStopSet(stopWords1);
- reader = new StringReader(sb.toString());
- StopFilter stpf0 = new StopFilter(new WhitespaceTokenizer(reader), stopSet0); // first part of the set
- stpf0.setEnablePositionIncrements(true);
- StopFilter stpf01 = new StopFilter(stpf0, stopSet1); // two stop filters concatenated!
- doTestStopPositons(stpf01,true);
- }
-
- private void doTestStopPositons(StopFilter stpf, boolean enableIcrements) throws IOException {
- log("---> test with enable-increments-"+(enableIcrements?"enabled":"disabled"));
- stpf.setEnablePositionIncrements(enableIcrements);
- for (int i=0; i<20; i+=3) {
- Token t = stpf.next();
- log("Token "+i+": "+t);
- String w = English.intToEnglish(i).trim();
- assertEquals("expecting token "+i+" to be "+w,w,t.termText());
- assertEquals("all but first token must have position increment of 3",enableIcrements?(i==0?1:3):1,t.getPositionIncrement());
- }
- assertNull(stpf.next());
- }
-
- // print debug info depending on VERBOSE
- private static void log(String s) {
- if (VERBOSE) {
- System.out.println(s);
- }
- }
-}
+package org.apache.lucene.analysis;
+
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.ArrayList;
+import java.util.Set;
+
+/**
+ * @author yonik
+ */
+public class TestStopFilter extends LuceneTestCase {
+
+ private final static boolean VERBOSE = false;
+
+ // other StopFilter functionality is already tested by TestStopAnalyzer
+
+ public void testExactCase() throws IOException {
+ StringReader reader = new StringReader("Now is The Time");
+ String[] stopWords = new String[] { "is", "the", "Time" };
+ TokenStream stream = new StopFilter(new WhitespaceTokenizer(reader), stopWords);
+ assertEquals("Now", stream.next().termText());
+ assertEquals("The", stream.next().termText());
+ assertEquals(null, stream.next());
+ }
+
+ public void testIgnoreCase() throws IOException {
+ StringReader reader = new StringReader("Now is The Time");
+ String[] stopWords = new String[] { "is", "the", "Time" };
+ TokenStream stream = new StopFilter(new WhitespaceTokenizer(reader), stopWords, true);
+ assertEquals("Now", stream.next().termText());
+ assertEquals(null,stream.next());
+ }
+
+ public void testStopFilt() throws IOException {
+ StringReader reader = new StringReader("Now is The Time");
+ String[] stopWords = new String[] { "is", "the", "Time" };
+ Set stopSet = StopFilter.makeStopSet(stopWords);
+ TokenStream stream = new StopFilter(new WhitespaceTokenizer(reader), stopSet);
+ assertEquals("Now", stream.next().termText());
+ assertEquals("The", stream.next().termText());
+ assertEquals(null, stream.next());
+ }
+
+ /**
+ * Test Position increments applied by StopFilter with and without enabling this option.
+ */
+ public void testStopPositons() throws IOException {
+ StringBuffer sb = new StringBuffer();
+ ArrayList a = new ArrayList();
+ for (int i=0; i<20; i++) {
+ String w = English.intToEnglish(i).trim();
+ sb.append(w).append(" ");
+ if (i%3 != 0) a.add(w);
+ }
+ log(sb.toString());
+ String stopWords[] = (String[]) a.toArray(new String[0]);
+ for (int i=0; i<a.size(); i++) log("Stop: "+stopWords[i]);
+ Set stopSet = StopFilter.makeStopSet(stopWords);
+ // with increments
+ StringReader reader = new StringReader(sb.toString());
+ StopFilter stpf = new StopFilter(new WhitespaceTokenizer(reader), stopSet);
+ doTestStopPositons(stpf,true);
+ // without increments
+ reader = new StringReader(sb.toString());
+ stpf = new StopFilter(new WhitespaceTokenizer(reader), stopSet);
+ doTestStopPositons(stpf,false);
+ // with increments, concatenating two stop filters
+ ArrayList a0 = new ArrayList();
+ ArrayList a1 = new ArrayList();
+ for (int i=0; i<a.size(); i++) {
+ if (i%2==0) {
+ a0.add(a.get(i));
+ } else {
+ a1.add(a.get(i));
+ }
+ }
+ String stopWords0[] = (String[]) a0.toArray(new String[0]);
+ for (int i=0; i<a0.size(); i++) log("Stop0: "+stopWords0[i]);
+ String stopWords1[] = (String[]) a1.toArray(new String[0]);
+ for (int i=0; i<a1.size(); i++) log("Stop1: "+stopWords1[i]);
+ Set stopSet0 = StopFilter.makeStopSet(stopWords0);
+ Set stopSet1 = StopFilter.makeStopSet(stopWords1);
+ reader = new StringReader(sb.toString());
+ StopFilter stpf0 = new StopFilter(new WhitespaceTokenizer(reader), stopSet0); // first part of the set
+ stpf0.setEnablePositionIncrements(true);
+ StopFilter stpf01 = new StopFilter(stpf0, stopSet1); // two stop filters concatenated!
+ doTestStopPositons(stpf01,true);
+ }
+
+ private void doTestStopPositons(StopFilter stpf, boolean enableIcrements) throws IOException {
+ log("---> test with enable-increments-"+(enableIcrements?"enabled":"disabled"));
+ stpf.setEnablePositionIncrements(enableIcrements);
+ for (int i=0; i<20; i+=3) {
+ Token t = stpf.next();
+ log("Token "+i+": "+t);
+ String w = English.intToEnglish(i).trim();
+ assertEquals("expecting token "+i+" to be "+w,w,t.termText());
+ assertEquals("all but first token must have position increment of 3",enableIcrements?(i==0?1:3):1,t.getPositionIncrement());
+ }
+ assertNull(stpf.next());
+ }
+
+ // print debug info depending on VERBOSE
+ private static void log(String s) {
+ if (VERBOSE) {
+ System.out.println(s);
+ }
+ }
+}
Propchange: lucene/java/trunk/src/test/org/apache/lucene/analysis/TestStopFilter.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexModifier.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
------------------------------------------------------------------------------
svn:eol-style = native
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java?rev=658136&r1=658135&r2=658136&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java Tue May 20 00:40:54 2008
@@ -1,108 +1,108 @@
-package org.apache.lucene.index;
-/**
- * Copyright 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.LuceneTestCase;
-
-import java.io.IOException;
-
-
-public class TestIndexWriterMerging extends LuceneTestCase
-{
-
- /**
- * Tests that index merging (specifically addIndexes()) doesn't
- * change the index order of documents.
- */
- public void testLucene() throws IOException
- {
-
- int num=100;
-
- Directory indexA = new MockRAMDirectory();
- Directory indexB = new MockRAMDirectory();
-
- fillIndex(indexA, 0, num);
- boolean fail = verifyIndex(indexA, 0);
- if (fail)
- {
- fail("Index a is invalid");
- }
-
- fillIndex(indexB, num, num);
- fail = verifyIndex(indexB, num);
- if (fail)
- {
- fail("Index b is invalid");
- }
-
- Directory merged = new MockRAMDirectory();
-
- IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
- writer.setMergeFactor(2);
-
- writer.addIndexes(new Directory[]{indexA, indexB});
- writer.close();
-
- fail = verifyIndex(merged, 0);
- merged.close();
-
- assertFalse("The merged index is invalid", fail);
- }
-
- private boolean verifyIndex(Directory directory, int startAt) throws IOException
- {
- boolean fail = false;
- IndexReader reader = IndexReader.open(directory);
-
- int max = reader.maxDoc();
- for (int i = 0; i < max; i++)
- {
- Document temp = reader.document(i);
- //System.out.println("doc "+i+"="+temp.getField("count").stringValue());
- //compare the index doc number to the value that it should be
- if (!temp.getField("count").stringValue().equals((i + startAt) + ""))
- {
- fail = true;
- System.out.println("Document " + (i + startAt) + " is returning document " + temp.getField("count").stringValue());
- }
- }
- reader.close();
- return fail;
- }
-
- private void fillIndex(Directory dir, int start, int numDocs) throws IOException
- {
-
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
- writer.setMergeFactor(2);
- writer.setMaxBufferedDocs(2);
-
- for (int i = start; i < (start + numDocs); i++)
- {
- Document temp = new Document();
- temp.add(new Field("count", (""+i), Field.Store.YES, Field.Index.UN_TOKENIZED));
-
- writer.addDocument(temp);
- }
- writer.close();
- }
-}
+package org.apache.lucene.index;
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.io.IOException;
+
+
+public class TestIndexWriterMerging extends LuceneTestCase
+{
+
+ /**
+ * Tests that index merging (specifically addIndexes()) doesn't
+ * change the index order of documents.
+ */
+ public void testLucene() throws IOException
+ {
+
+ int num=100;
+
+ Directory indexA = new MockRAMDirectory();
+ Directory indexB = new MockRAMDirectory();
+
+ fillIndex(indexA, 0, num);
+ boolean fail = verifyIndex(indexA, 0);
+ if (fail)
+ {
+ fail("Index a is invalid");
+ }
+
+ fillIndex(indexB, num, num);
+ fail = verifyIndex(indexB, num);
+ if (fail)
+ {
+ fail("Index b is invalid");
+ }
+
+ Directory merged = new MockRAMDirectory();
+
+ IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer.setMergeFactor(2);
+
+ writer.addIndexes(new Directory[]{indexA, indexB});
+ writer.close();
+
+ fail = verifyIndex(merged, 0);
+ merged.close();
+
+ assertFalse("The merged index is invalid", fail);
+ }
+
+ private boolean verifyIndex(Directory directory, int startAt) throws IOException
+ {
+ boolean fail = false;
+ IndexReader reader = IndexReader.open(directory);
+
+ int max = reader.maxDoc();
+ for (int i = 0; i < max; i++)
+ {
+ Document temp = reader.document(i);
+ //System.out.println("doc "+i+"="+temp.getField("count").stringValue());
+ //compare the index doc number to the value that it should be
+ if (!temp.getField("count").stringValue().equals((i + startAt) + ""))
+ {
+ fail = true;
+ System.out.println("Document " + (i + startAt) + " is returning document " + temp.getField("count").stringValue());
+ }
+ }
+ reader.close();
+ return fail;
+ }
+
+ private void fillIndex(Directory dir, int start, int numDocs) throws IOException
+ {
+
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer.setMergeFactor(2);
+ writer.setMaxBufferedDocs(2);
+
+ for (int i = start; i < (start + numDocs); i++)
+ {
+ Document temp = new Document();
+ temp.add(new Field("count", (""+i), Field.Store.YES, Field.Index.UN_TOKENIZED));
+
+ writer.addDocument(temp);
+ }
+ writer.close();
+ }
+}
Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
------------------------------------------------------------------------------
svn:eol-style = native
Modified: lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
URL: http://svn.apache.org/viewvc/lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java?rev=658136&r1=658135&r2=658136&view=diff
==============================================================================
--- lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (original)
+++ lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java Tue May 20 00:40:54 2008
@@ -1,158 +1,158 @@
-package org.apache.lucene.index;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.io.Reader;
-
-import org.apache.lucene.util.LuceneTestCase;
-
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.LowerCaseTokenizer;
-import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenFilter;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.RAMDirectory;
-
-/**
- * This testcase tests whether multi-level skipping is being used
- * to reduce I/O while skipping through posting lists.
- *
- * Skipping in general is already covered by several other
- * testcases.
- *
- */
-public class TestMultiLevelSkipList extends LuceneTestCase {
- public void testSimpleSkip() throws IOException {
- RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new PayloadAnalyzer(), true,
- IndexWriter.MaxFieldLength.LIMITED);
- Term term = new Term("test", "a");
- for (int i = 0; i < 5000; i++) {
- Document d1 = new Document();
- d1.add(new Field(term.field(), term.text(), Store.NO, Index.TOKENIZED));
- writer.addDocument(d1);
- }
- writer.flush();
- writer.optimize();
- writer.close();
-
- IndexReader reader = IndexReader.open(dir);
- SegmentTermPositions tp = (SegmentTermPositions) reader.termPositions();
- tp.freqStream = new CountingStream(tp.freqStream);
-
- for (int i = 0; i < 2; i++) {
- counter = 0;
- tp.seek(term);
-
- checkSkipTo(tp, 14, 185); // no skips
- checkSkipTo(tp, 17, 190); // one skip on level 0
- checkSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
-
- // this test would fail if we had only one skip level,
- // because than more bytes would be read from the freqStream
- checkSkipTo(tp, 4800, 250);// one skip on level 2
- }
- }
-
- public void checkSkipTo(TermPositions tp, int target, int maxCounter) throws IOException {
- tp.skipTo(target);
- if (maxCounter < counter) {
- fail("Too many bytes read: " + counter);
- }
-
- assertEquals("Wrong document " + tp.doc() + " after skipTo target " + target, target, tp.doc());
- assertEquals("Frequency is not 1: " + tp.freq(), 1,tp.freq());
- tp.nextPosition();
- byte[] b = new byte[1];
- tp.getPayload(b, 0);
- assertEquals("Wrong payload for the target " + target + ": " + b[0], (byte) target, b[0]);
- }
-
- private static class PayloadAnalyzer extends Analyzer {
- public TokenStream tokenStream(String fieldName, Reader reader) {
- return new PayloadFilter(new LowerCaseTokenizer(reader));
- }
-
- }
-
- private static class PayloadFilter extends TokenFilter {
- static int count = 0;
-
- protected PayloadFilter(TokenStream input) {
- super(input);
- }
-
- public Token next() throws IOException {
- Token t = input.next();
- if (t != null) {
- t.setPayload(new Payload(new byte[] { (byte) count++ }));
- }
- return t;
- }
-
- }
-
- private int counter = 0;
-
- // Simply extends IndexInput in a way that we are able to count the number
- // of bytes read
- class CountingStream extends IndexInput {
- private IndexInput input;
-
- CountingStream(IndexInput input) {
- this.input = input;
- }
-
- public byte readByte() throws IOException {
- TestMultiLevelSkipList.this.counter++;
- return this.input.readByte();
- }
-
- public void readBytes(byte[] b, int offset, int len) throws IOException {
- TestMultiLevelSkipList.this.counter += len;
- this.input.readBytes(b, offset, len);
- }
-
- public void close() throws IOException {
- this.input.close();
- }
-
- public long getFilePointer() {
- return this.input.getFilePointer();
- }
-
- public void seek(long pos) throws IOException {
- this.input.seek(pos);
- }
-
- public long length() {
- return this.input.length();
- }
-
- public Object clone() {
- return new CountingStream((IndexInput) this.input.clone());
- }
-
- }
-}
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.LowerCaseTokenizer;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.RAMDirectory;
+
+/**
+ * This testcase tests whether multi-level skipping is being used
+ * to reduce I/O while skipping through posting lists.
+ *
+ * Skipping in general is already covered by several other
+ * testcases.
+ *
+ */
+public class TestMultiLevelSkipList extends LuceneTestCase {
+ public void testSimpleSkip() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexWriter writer = new IndexWriter(dir, new PayloadAnalyzer(), true,
+ IndexWriter.MaxFieldLength.LIMITED);
+ Term term = new Term("test", "a");
+ for (int i = 0; i < 5000; i++) {
+ Document d1 = new Document();
+ d1.add(new Field(term.field(), term.text(), Store.NO, Index.TOKENIZED));
+ writer.addDocument(d1);
+ }
+ writer.flush();
+ writer.optimize();
+ writer.close();
+
+ IndexReader reader = IndexReader.open(dir);
+ SegmentTermPositions tp = (SegmentTermPositions) reader.termPositions();
+ tp.freqStream = new CountingStream(tp.freqStream);
+
+ for (int i = 0; i < 2; i++) {
+ counter = 0;
+ tp.seek(term);
+
+ checkSkipTo(tp, 14, 185); // no skips
+ checkSkipTo(tp, 17, 190); // one skip on level 0
+ checkSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
+
+ // this test would fail if we had only one skip level,
+ // because than more bytes would be read from the freqStream
+ checkSkipTo(tp, 4800, 250);// one skip on level 2
+ }
+ }
+
+ public void checkSkipTo(TermPositions tp, int target, int maxCounter) throws IOException {
+ tp.skipTo(target);
+ if (maxCounter < counter) {
+ fail("Too many bytes read: " + counter);
+ }
+
+ assertEquals("Wrong document " + tp.doc() + " after skipTo target " + target, target, tp.doc());
+ assertEquals("Frequency is not 1: " + tp.freq(), 1,tp.freq());
+ tp.nextPosition();
+ byte[] b = new byte[1];
+ tp.getPayload(b, 0);
+ assertEquals("Wrong payload for the target " + target + ": " + b[0], (byte) target, b[0]);
+ }
+
+ private static class PayloadAnalyzer extends Analyzer {
+ public TokenStream tokenStream(String fieldName, Reader reader) {
+ return new PayloadFilter(new LowerCaseTokenizer(reader));
+ }
+
+ }
+
+ private static class PayloadFilter extends TokenFilter {
+ static int count = 0;
+
+ protected PayloadFilter(TokenStream input) {
+ super(input);
+ }
+
+ public Token next() throws IOException {
+ Token t = input.next();
+ if (t != null) {
+ t.setPayload(new Payload(new byte[] { (byte) count++ }));
+ }
+ return t;
+ }
+
+ }
+
+ private int counter = 0;
+
+ // Simply extends IndexInput in a way that we are able to count the number
+ // of bytes read
+ class CountingStream extends IndexInput {
+ private IndexInput input;
+
+ CountingStream(IndexInput input) {
+ this.input = input;
+ }
+
+ public byte readByte() throws IOException {
+ TestMultiLevelSkipList.this.counter++;
+ return this.input.readByte();
+ }
+
+ public void readBytes(byte[] b, int offset, int len) throws IOException {
+ TestMultiLevelSkipList.this.counter += len;
+ this.input.readBytes(b, offset, len);
+ }
+
+ public void close() throws IOException {
+ this.input.close();
+ }
+
+ public long getFilePointer() {
+ return this.input.getFilePointer();
+ }
+
+ public void seek(long pos) throws IOException {
+ this.input.seek(pos);
+ }
+
+ public long length() {
+ return this.input.length();
+ }
+
+ public Object clone() {
+ return new CountingStream((IndexInput) this.input.clone());
+ }
+
+ }
+}
Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: lucene/java/trunk/src/test/org/apache/lucene/index/TestParallelReader.java
------------------------------------------------------------------------------
svn:eol-style = native