You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ko...@apache.org on 2011/02/05 01:12:31 UTC

svn commit: r1067352 - in /lucene/dev/branches/branch_3x: ./ lucene/ lucene/contrib/ lucene/contrib/analyzers/common/src/java/org/apache/lucene/analysis/path/ lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/path/ solr/ solr/example/...

Author: koji
Date: Sat Feb  5 00:12:30 2011
New Revision: 1067352

URL: http://svn.apache.org/viewvc?rev=1067352&view=rev
Log:
SOLR-1057: add PathHierarchyTokenizer

Added:
    lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/java/org/apache/lucene/analysis/path/
    lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java
    lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/path/
    lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java
    lucene/dev/branches/branch_3x/solr/src/java/org/apache/solr/analysis/PathHierarchyTokenizerFactory.java
      - copied unchanged from r1067131, lucene/dev/trunk/solr/src/java/org/apache/solr/analysis/PathHierarchyTokenizerFactory.java
Modified:
    lucene/dev/branches/branch_3x/   (props changed)
    lucene/dev/branches/branch_3x/lucene/   (props changed)
    lucene/dev/branches/branch_3x/lucene/contrib/CHANGES.txt
    lucene/dev/branches/branch_3x/solr/   (props changed)
    lucene/dev/branches/branch_3x/solr/CHANGES.txt
    lucene/dev/branches/branch_3x/solr/example/solr/conf/schema.xml

Modified: lucene/dev/branches/branch_3x/lucene/contrib/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/contrib/CHANGES.txt?rev=1067352&r1=1067351&r2=1067352&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/contrib/CHANGES.txt (original)
+++ lucene/dev/branches/branch_3x/lucene/contrib/CHANGES.txt Sat Feb  5 00:12:30 2011
@@ -255,6 +255,9 @@ New features
  * LUCENE-2842: Add analyzer for Galician. Also adds the RSLP (Orengo) stemmer
    for Portuguese.  (Robert Muir)
 
+ * SOLR-1057: Add PathHierarchyTokenizer that represents file path hierarchies as synonyms of
+   /something, /something/something, /something/something/else. (Ryan McKinley, Koji Sekiguchi)
+
 Build
 
  * LUCENE-2124: Moved the JDK-based collation support from contrib/collation 

Added: lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java?rev=1067352&view=auto
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java (added)
+++ lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizer.java Sat Feb  5 00:12:30 2011
@@ -0,0 +1,150 @@
+package org.apache.lucene.analysis.path;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+
+/**
+ * 
+ * Take something like:
+ * 
+ * <pre>
+ *  /soemthing/something/else
+ * </pre>
+ * 
+ * and make:
+ *  
+ * <pre>
+ *  /soemthing
+ *  /soemthing/something
+ *  /soemthing/something/else
+ * </pre>
+ * 
+ */
+public class PathHierarchyTokenizer extends Tokenizer {
+
+  public PathHierarchyTokenizer(Reader input) {
+    this(input, DEFAULT_BUFFER_SIZE, DEFAULT_DELIMITER);
+  }
+
+  public PathHierarchyTokenizer(Reader input, int bufferSize, char delimiter) {
+    this(input, bufferSize, delimiter, delimiter);
+  }
+
+  public PathHierarchyTokenizer(Reader input, char delimiter, char replacement) {
+    this(input, DEFAULT_BUFFER_SIZE, delimiter, replacement);
+  }
+
+  public PathHierarchyTokenizer(Reader input, int bufferSize, char delimiter, char replacement) {
+    super(input);
+    termAtt.resizeBuffer(bufferSize);
+    this.delimiter = delimiter;
+    this.replacement = replacement;
+    endDelimiter = false;
+    resultToken = new StringBuilder(bufferSize);
+  }
+  
+  private static final int DEFAULT_BUFFER_SIZE = 1024;
+  public static final char DEFAULT_DELIMITER = '/';
+  private final char delimiter;
+  private final char replacement;
+  
+  private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+  private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+  private final PositionIncrementAttribute posAtt = addAttribute(PositionIncrementAttribute.class);
+  private int finalOffset = 0;
+  private boolean endDelimiter;
+  private StringBuilder resultToken;
+
+  @Override
+  public final boolean incrementToken() throws IOException {
+    clearAttributes();
+    termAtt.append( resultToken );
+    if(resultToken.length() == 0){
+      posAtt.setPositionIncrement(1);
+    }
+    else{
+      posAtt.setPositionIncrement(0);
+    }
+    int length = 0;
+    boolean added = false;
+    if( endDelimiter ){
+      termAtt.append(replacement);
+      length++;
+      endDelimiter = false;
+      added = true;
+    }
+
+    while (true) {
+      int c = input.read();
+      if( c < 0 ) {
+        length += resultToken.length();
+        termAtt.setLength(length);
+        finalOffset = correctOffset(length);
+        offsetAtt.setOffset(correctOffset(0), finalOffset);
+        if( added ){
+          resultToken.setLength(0);
+          resultToken.append(termAtt.buffer(), 0, length);
+        }
+        return added;
+      }
+      added = true;
+      if( c == delimiter ) {
+        if( length > 0 ){
+          endDelimiter = true;
+          break;
+        }
+        else{
+          termAtt.append(replacement);
+          length++;
+        }
+      }
+      else {
+        termAtt.append((char)c);
+        length++;
+      }
+    }
+
+    length += resultToken.length();
+    termAtt.setLength(length);
+    finalOffset = correctOffset(length);
+    offsetAtt.setOffset(correctOffset(0), finalOffset);
+    resultToken.setLength(0);
+    resultToken.append(termAtt.buffer(), 0, length);
+    return true;
+  }
+  
+  @Override
+  public final void end() {
+    // set final offset
+    offsetAtt.setOffset(finalOffset, finalOffset);
+  }
+
+  @Override
+  public void reset(Reader input) throws IOException {
+    super.reset(input);
+    resultToken.setLength(0);
+    finalOffset = 0;
+    endDelimiter = false;
+  }
+}

Added: lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java?rev=1067352&view=auto
==============================================================================
--- lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java (added)
+++ lucene/dev/branches/branch_3x/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java Sat Feb  5 00:12:30 2011
@@ -0,0 +1,121 @@
+package org.apache.lucene.analysis.path;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.StringReader;
+
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.analysis.CharStream;
+import org.apache.lucene.analysis.MappingCharFilter;
+import org.apache.lucene.analysis.NormalizeCharMap;
+
+public class TestPathHierarchyTokenizer extends BaseTokenStreamTestCase {
+
+  public void testBasic() throws Exception {
+    String path = "/a/b/c";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) );
+    assertTokenStreamContents(t,
+        new String[]{"/a", "/a/b", "/a/b/c"},
+        new int[]{0, 0, 0},
+        new int[]{2, 4, 6},
+        new int[]{1, 0, 0});
+  }
+
+  public void testEndOfDelimiter() throws Exception {
+    String path = "/a/b/c/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) );
+    assertTokenStreamContents(t,
+        new String[]{"/a", "/a/b", "/a/b/c", "/a/b/c/"},
+        new int[]{0, 0, 0, 0},
+        new int[]{2, 4, 6, 7},
+        new int[]{1, 0, 0, 0});
+  }
+
+  public void testStartOfChar() throws Exception {
+    String path = "a/b/c";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) );
+    assertTokenStreamContents(t,
+        new String[]{"a", "a/b", "a/b/c"},
+        new int[]{0, 0, 0},
+        new int[]{1, 3, 5},
+        new int[]{1, 0, 0});
+  }
+
+  public void testStartOfCharEndOfDelimiter() throws Exception {
+    String path = "a/b/c/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) );
+    assertTokenStreamContents(t,
+        new String[]{"a", "a/b", "a/b/c", "a/b/c/"},
+        new int[]{0, 0, 0, 0},
+        new int[]{1, 3, 5, 6},
+        new int[]{1, 0, 0, 0});
+  }
+
+  public void testOnlyDelimiter() throws Exception {
+    String path = "/";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) );
+    assertTokenStreamContents(t,
+        new String[]{"/"},
+        new int[]{0},
+        new int[]{1},
+        new int[]{1});
+  }
+
+  public void testOnlyDelimiters() throws Exception {
+    String path = "//";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path) );
+    assertTokenStreamContents(t,
+        new String[]{"/", "//"},
+        new int[]{0, 0},
+        new int[]{1, 2},
+        new int[]{1, 0});
+  }
+
+  public void testReplace() throws Exception {
+    String path = "/a/b/c";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), '/', '\\' );
+    assertTokenStreamContents(t,
+        new String[]{"\\a", "\\a\\b", "\\a\\b\\c"},
+        new int[]{0, 0, 0},
+        new int[]{2, 4, 6},
+        new int[]{1, 0, 0});
+  }
+
+  public void testWindowsPath() throws Exception {
+    String path = "c:\\a\\b\\c";
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( new StringReader(path), '\\', '\\' );
+    assertTokenStreamContents(t,
+        new String[]{"c:", "c:\\a", "c:\\a\\b", "c:\\a\\b\\c"},
+        new int[]{0, 0, 0, 0},
+        new int[]{2, 4, 6, 8},
+        new int[]{1, 0, 0, 0});
+  }
+
+  public void testNormalizeWinDelimToLinuxDelim() throws Exception {
+    NormalizeCharMap normMap = new NormalizeCharMap();
+    normMap.add("\\", "/");
+    String path = "c:\\a\\b\\c";
+    CharStream cs = new MappingCharFilter(normMap, new StringReader(path));
+    PathHierarchyTokenizer t = new PathHierarchyTokenizer( cs );
+    assertTokenStreamContents(t,
+        new String[]{"c:", "c:/a", "c:/a/b", "c:/a/b/c"},
+        new int[]{0, 0, 0, 0},
+        new int[]{2, 4, 6, 8},
+        new int[]{1, 0, 0, 0});
+  }
+}

Modified: lucene/dev/branches/branch_3x/solr/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/solr/CHANGES.txt?rev=1067352&r1=1067351&r2=1067352&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/solr/CHANGES.txt (original)
+++ lucene/dev/branches/branch_3x/solr/CHANGES.txt Sat Feb  5 00:12:30 2011
@@ -262,6 +262,8 @@ New Features
 
 * SOLR-860: Add debug output for MoreLikeThis. (koji)
 
+* SOLR-1057: Add PathHierarchyTokenizerFactory. (ryan, koji)
+
 Optimizations
 ----------------------
 

Modified: lucene/dev/branches/branch_3x/solr/example/solr/conf/schema.xml
URL: http://svn.apache.org/viewvc/lucene/dev/branches/branch_3x/solr/example/solr/conf/schema.xml?rev=1067352&r1=1067351&r2=1067352&view=diff
==============================================================================
--- lucene/dev/branches/branch_3x/solr/example/solr/conf/schema.xml (original)
+++ lucene/dev/branches/branch_3x/solr/example/solr/conf/schema.xml Sat Feb  5 00:12:30 2011
@@ -395,6 +395,11 @@
       </analyzer>
     </fieldType>
 
+    <fieldType name="text_path" class="solr.TextField" positionIncrementGap="100">
+      <analyzer>
+        <tokenizer class="solr.PathHierarchyTokenizerFactory"/>
+      </analyzer>
+    </fieldType>
 
     <!-- since fields of this type are by default not stored or indexed,
          any data added to them will be ignored outright.  -->