You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by mi...@apache.org on 2012/04/22 23:03:07 UTC

svn commit: r1328975 - in /lucene/dev/trunk: dev-tools/scripts/ lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ lucene/analysis/common/src/java/org/apache/lucene/...

Author: mikemccand
Date: Sun Apr 22 21:03:06 2012
New Revision: 1328975

URL: http://svn.apache.org/viewvc?rev=1328975&view=rev
Log:
fix some more broken links...

Modified:
    lucene/dev/trunk/dev-tools/scripts/checkJavadocLinks.py
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/package.html
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java
    lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArraySet.java
    lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java

Modified: lucene/dev/trunk/dev-tools/scripts/checkJavadocLinks.py
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/dev-tools/scripts/checkJavadocLinks.py?rev=1328975&r1=1328974&r2=1328975&view=diff
==============================================================================
--- lucene/dev/trunk/dev-tools/scripts/checkJavadocLinks.py (original)
+++ lucene/dev/trunk/dev-tools/scripts/checkJavadocLinks.py Sun Apr 22 21:03:06 2012
@@ -154,7 +154,26 @@ def checkAll(dirName):
       # TODO: normalize path sep for windows...
       if link.startswith('http://') or link.startswith('https://'):
         # don't check external links
-        pass
+
+        if link.find('lucene.apache.org/java/docs/mailinglists.html') != -1:
+          # OK
+          pass
+        elif link.find('lucene.apache.org/java/docs/discussion.html') != -1:
+          # OK
+          pass
+        elif link.find('svn.apache.org') != -1 or link.find('lucene.apache.org') != -1:
+          if not printed:
+            printed = True
+            print
+            print fullPath
+          print '  BAD EXTERNAL LINK: %s' % link
+      elif link.startswith('mailto://'):
+        if link.find('@lucene.apache.org') == -1 and link.find('@apache.org') != -1:
+          if not printed:
+            printed = True
+            print
+            print fullPath
+          print '  BROKEN MAILTO (?): %s' % link
       elif link not in allFiles:
         # We only load HTML... so if the link is another resource (eg
         # SweetSpotSimilarity refs

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java?rev=1328975&r1=1328974&r2=1328975&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java Sun Apr 22 21:03:06 2012
@@ -33,6 +33,8 @@ import org.apache.lucene.util.Version;
 /**
  * Base class for decomposition token filters.
  * <p>
+ *
+ * <a name="version"></a>
  * You must specify the required {@link Version} compatibility when creating
  * CompoundWordTokenFilterBase:
  * <ul>

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java?rev=1328975&r1=1328974&r2=1328975&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.java Sun Apr 22 21:03:06 2012
@@ -48,7 +48,7 @@ public class DictionaryCompoundWordToken
    * @param matchVersion
    *          Lucene version to enable correct Unicode 4.0 behavior in the
    *          dictionaries if Version > 3.0. See <a
-   *          href="CompoundWordTokenFilterBase#version"
+   *          href="CompoundWordTokenFilterBase.html#version"
    *          >CompoundWordTokenFilterBase</a> for details.
    * @param input
    *          the {@link TokenStream} to process
@@ -65,7 +65,7 @@ public class DictionaryCompoundWordToken
    * @param matchVersion
    *          Lucene version to enable correct Unicode 4.0 behavior in the
    *          dictionaries if Version > 3.0. See <a
-   *          href="CompoundWordTokenFilterBase#version"
+   *          href="CompoundWordTokenFilterBase.html#version"
    *          >CompoundWordTokenFilterBase</a> for details.
    * @param input
    *          the {@link TokenStream} to process

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java?rev=1328975&r1=1328974&r2=1328975&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java Sun Apr 22 21:03:06 2012
@@ -52,7 +52,7 @@ public class HyphenationCompoundWordToke
    * @param matchVersion
    *          Lucene version to enable correct Unicode 4.0 behavior in the
    *          dictionaries if Version > 3.0. See <a
-   *          href="CompoundWordTokenFilterBase#version"
+   *          href="CompoundWordTokenFilterBase.html#version"
    *          >CompoundWordTokenFilterBase</a> for details.
    * @param input
    *          the {@link TokenStream} to process
@@ -73,7 +73,7 @@ public class HyphenationCompoundWordToke
    * @param matchVersion
    *          Lucene version to enable correct Unicode 4.0 behavior in the
    *          dictionaries if Version > 3.0. See <a
-   *          href="CompoundWordTokenFilterBase#version"
+   *          href="CompoundWordTokenFilterBase.html#version"
    *          >CompoundWordTokenFilterBase</a> for details.
    * @param input
    *          the {@link TokenStream} to process

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/package.html
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/package.html?rev=1328975&r1=1328974&r2=1328975&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/package.html (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/package.html Sun Apr 22 21:03:06 2012
@@ -26,7 +26,7 @@ Fast, general-purpose grammar-based toke
 <p>The <code>org.apache.lucene.analysis.standard</code> package contains three
     fast grammar-based tokenizers constructed with JFlex:</p>
 <ul>
-    <li><code><a href="StandardTokenizer.html">StandardTokenizer</a></code>:
+    <li>{@link org.apache.lucene.analysis.standard.StandardTokenizer}:
         as of Lucene 3.1, implements the Word Break rules from the Unicode Text 
         Segmentation algorithm, as specified in 
         <a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a>.
@@ -34,35 +34,35 @@ Fast, general-purpose grammar-based toke
         <b>not</b> tokenized as single tokens, but are instead split up into 
         tokens according to the UAX#29 word break rules.
         <br/>
-        <code><a href="StandardAnalyzer">StandardAnalyzer</a></code> includes
-        <code>StandardTokenizer</code>, 
-        <code><a href="StandardFilter">StandardFilter</a></code>, 
-        <code><a href="../../../../../../all/org/apache/lucene/analysis/LowerCaseFilter.html">LowerCaseFilter</a></code>
-        and <code><a href="../../../../../../all/org/apache/lucene/analysis/StopFilter.html">StopFilter</a></code>.
+        {@link org.apache.lucene.analysis.standard.StandardAnalyzer StandardAnalyzer} includes
+        {@link org.apache.lucene.analysis.standard.StandardTokenizer StandardTokenizer},
+        {@link org.apache.lucene.analysis.standard.StandardFilter StandardFilter}, 
+        {@link org.apache.lucene.analysis.core.LowerCaseFilter LowerCaseFilter}
+        and {@link org.apache.lucene.analysis.core.StopFilter StopFilter}.
         When the <code>Version</code> specified in the constructor is lower than 
-        3.1, the <code><a href="ClassicTokenizer.html">ClassicTokenizer</a></code>
+        3.1, the {@link org.apache.lucene.analysis.standard.ClassicTokenizer ClassicTokenizer}
         implementation is invoked.</li>
-    <li><code><a href="ClassicTokenizer.html">ClassicTokenizer</a></code>:
+    <li>{@link org.apache.lucene.analysis.standard.ClassicTokenizer ClassicTokenizer}:
         this class was formerly (prior to Lucene 3.1) named 
         <code>StandardTokenizer</code>.  (Its tokenization rules are not
         based on the Unicode Text Segmentation algorithm.)
-        <code><a href="ClassicAnalyzer">ClassicAnalyzer</a></code> includes
-        <code>ClassicTokenizer</code>,
-        <code><a href="StandardFilter">StandardFilter</a></code>, 
-        <code><a href="../../../../../../all/org/apache/lucene/analysis/LowerCaseFilter.html">LowerCaseFilter</a></code>
-        and <code><a href="../../../../../../all/org/apache/lucene/analysis/StopFilter.html">StopFilter</a></code>.
+        {@link org.apache.lucene.analysis.standard.ClassicAnalyzer ClassicAnalyzer} includes
+        {@link org.apache.lucene.analysis.standard.ClassicTokenizer ClassicTokenizer},
+        {@link org.apache.lucene.analysis.standard.StandardFilter StandardFilter}, 
+        {@link org.apache.lucene.analysis.core.LowerCaseFilter LowerCaseFilter}
+        and {@link org.apache.lucene.analysis.core.StopFilter StopFilter}.
     </li>
-    <li><code><a href="UAX29URLEmailTokenizer.html">UAX29URLEmailTokenizer</a></code>: 
+    <li>{@link org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer UAX29URLEmailTokenizer}:
         implements the Word Break rules from the Unicode Text Segmentation
         algorithm, as specified in 
         <a href="http://unicode.org/reports/tr29/">Unicode Standard Annex #29</a>.
         URLs and email addresses are also tokenized according to the relevant RFCs.
         <br/>
-        <code><a href="UAX29URLEmailAnalyzer">UAX29URLEmailAnalyzer</a></code> includes
-        <code>UAX29URLEmailTokenizer</code>,
-        <code><a href="StandardFilter">StandardFilter</a></code>,
-        <code><a href="../../../../../../all/org/apache/lucene/analysis/LowerCaseFilter.html">LowerCaseFilter</a></code>
-        and <code><a href="../../../../../../all/org/apache/lucene/analysis/StopFilter.html">StopFilter</a></code>.
+        {@link org.apache.lucene.analysis.standard.UAX29URLEmailAnalyzer UAX29URLEmailAnalyzer} includes
+        {@link org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer UAX29URLEmailTokenizer},
+        {@link org.apache.lucene.analysis.standard.StandardFilter StandardFilter},
+        {@link org.apache.lucene.analysis.core.LowerCaseFilter LowerCaseFilter}
+        and {@link org.apache.lucene.analysis.core.StopFilter StopFilter}.
     </li>
 </ul>
 </body>

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java?rev=1328975&r1=1328974&r2=1328975&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java Sun Apr 22 21:03:06 2012
@@ -36,6 +36,8 @@ import org.apache.lucene.util.Version;
  * etc.  It is designed to be quick to retrieve items
  * by char[] keys without the necessity of converting
  * to a String first.
+ *
+ * <a name="version"></a>
  * <p>You must specify the required {@link Version}
  * compatibility when creating {@link CharArrayMap}:
  * <ul>
@@ -120,12 +122,12 @@ public class CharArrayMap<V> extends Abs
   }
 
   /** true if the <code>len</code> chars of <code>text</code> starting at <code>off</code>
-   * are in the {@link #keySet} */
+   * are in the {@link #keySet()} */
   public boolean containsKey(char[] text, int off, int len) {
     return keys[getSlot(text, off, len)] != null;
   }
 
-  /** true if the <code>CharSequence</code> is in the {@link #keySet} */
+  /** true if the <code>CharSequence</code> is in the {@link #keySet()} */
   public boolean containsKey(CharSequence cs) {
     return keys[getSlot(cs)] != null;
   }

Modified: lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArraySet.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArraySet.java?rev=1328975&r1=1328974&r2=1328975&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArraySet.java (original)
+++ lucene/dev/trunk/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArraySet.java Sun Apr 22 21:03:06 2012
@@ -33,6 +33,8 @@ import org.apache.lucene.util.Version;
  * etc.  It is designed to be quick to test if a char[]
  * is in the set without the necessity of converting it
  * to a String first.
+ *
+ * <a name="version"></a>
  * <p>You must specify the required {@link Version}
  * compatibility when creating {@link CharArraySet}:
  * <ul>

Modified: lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java?rev=1328975&r1=1328974&r2=1328975&view=diff
==============================================================================
--- lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java (original)
+++ lucene/dev/trunk/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java Sun Apr 22 21:03:06 2012
@@ -100,7 +100,7 @@ public final class JapaneseTokenizer ext
    */
   public static final Mode DEFAULT_MODE = Mode.SEARCH;
 
-  enum Type {
+  public enum Type {
     KNOWN,
     UNKNOWN,
     USER