You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tika.apache.org by ju...@apache.org on 2009/05/22 20:21:59 UTC

svn commit: r777643 [1/3] - in /lucene/tika/trunk: ./ tika-app/src/main/resources/META-INF/ tika-parsers/ tika-parsers/src/main/java/org/apache/tika/parser/txt/ tika-parsers/src/main/resources/META-INF/

Author: jukka
Date: Fri May 22 18:21:59 2009
New Revision: 777643

URL: http://svn.apache.org/viewvc?rev=777643&view=rev
Log:
TIKA-233: Inline the ICU4J charset detection logic

Copied the CharsetDetector and related classes from the latest ICU4J version to org.apache.tika.parser.txt. Note that ASF policy explicitly says that we should *not* add the Apache license header to these ICU4J files.

Updated licensing metadata accordingly.

Added:
    lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetDetector.java
    lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetMatch.java
    lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_2022.java
    lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_UTF8.java
    lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_Unicode.java
    lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_mbcs.java
    lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_sbcs.java
    lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecognizer.java
Modified:
    lucene/tika/trunk/CHANGES.txt
    lucene/tika/trunk/tika-app/src/main/resources/META-INF/LICENSE.txt
    lucene/tika/trunk/tika-app/src/main/resources/META-INF/NOTICE.txt
    lucene/tika/trunk/tika-parsers/pom.xml
    lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/TXTParser.java
    lucene/tika/trunk/tika-parsers/src/main/resources/META-INF/LICENSE.txt
    lucene/tika/trunk/tika-parsers/src/main/resources/META-INF/NOTICE.txt

Modified: lucene/tika/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/tika/trunk/CHANGES.txt?rev=777643&r1=777642&r2=777643&view=diff
==============================================================================
--- lucene/tika/trunk/CHANGES.txt (original)
+++ lucene/tika/trunk/CHANGES.txt Fri May 22 18:21:59 2009
@@ -18,7 +18,10 @@
     parsing task. (TIKA-215)
 
   * Automatic type detection of text- and XML-based documents has been
-    improved. (TIKA-225) 
+    improved. (TIKA-225)
+
+  * Charset detection functionality from the ICU4J library was inlined
+    in Tika to avoid the dependency to the large ICU4J jar. (TIKA-229)
 
 Release 0.3 - 03/09/2009
 ------------------------

Modified: lucene/tika/trunk/tika-app/src/main/resources/META-INF/LICENSE.txt
URL: http://svn.apache.org/viewvc/lucene/tika/trunk/tika-app/src/main/resources/META-INF/LICENSE.txt?rev=777643&r1=777642&r2=777643&view=diff
==============================================================================
--- lucene/tika/trunk/tika-app/src/main/resources/META-INF/LICENSE.txt (original)
+++ lucene/tika/trunk/tika-app/src/main/resources/META-INF/LICENSE.txt Fri May 22 18:21:59 2009
@@ -291,9 +291,9 @@
     OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
     OF SUCH DAMAGE.
 
-ICU4J library (icu4j)
+Charset detection code from ICU4J (http://site.icu-project.org/)
 
-    Copyright (c) 1995-2005 International Business Machines Corporation
+    Copyright (c) 1995-2009 International Business Machines Corporation
     and others
 
     All rights reserved.

Modified: lucene/tika/trunk/tika-app/src/main/resources/META-INF/NOTICE.txt
URL: http://svn.apache.org/viewvc/lucene/tika/trunk/tika-app/src/main/resources/META-INF/NOTICE.txt?rev=777643&r1=777642&r2=777643&view=diff
==============================================================================
--- lucene/tika/trunk/tika-app/src/main/resources/META-INF/NOTICE.txt (original)
+++ lucene/tika/trunk/tika-app/src/main/resources/META-INF/NOTICE.txt Fri May 22 18:21:59 2009
@@ -13,7 +13,7 @@
 
 Copyright (c) 2003-2005, www.fontbox.org
 
-Copyright (c) 1995-2005 International Business Machines Corporation and others
+Copyright (c) 1995-2009 International Business Machines Corporation and others
 
 Copyright (c) 2000-2005 INRIA, France Telecom
 

Modified: lucene/tika/trunk/tika-parsers/pom.xml
URL: http://svn.apache.org/viewvc/lucene/tika/trunk/tika-parsers/pom.xml?rev=777643&r1=777642&r2=777643&view=diff
==============================================================================
--- lucene/tika/trunk/tika-parsers/pom.xml (original)
+++ lucene/tika/trunk/tika-parsers/pom.xml Fri May 22 18:21:59 2009
@@ -82,11 +82,6 @@
       <version>1.9.9</version>
     </dependency>
     <dependency>
-      <groupId>com.ibm.icu</groupId>
-      <artifactId>icu4j</artifactId>
-      <version>3.8</version>
-    </dependency>
-    <dependency>
       <groupId>asm</groupId>
       <artifactId>asm</artifactId>
       <version>3.1</version>

Added: lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetDetector.java
URL: http://svn.apache.org/viewvc/lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetDetector.java?rev=777643&view=auto
==============================================================================
--- lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetDetector.java (added)
+++ lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetDetector.java Fri May 22 18:21:59 2009
@@ -0,0 +1,525 @@
+/**
+*******************************************************************************
+* Copyright (C) 2005-2009, International Business Machines Corporation and    *
+* others. All Rights Reserved.                                                *
+*******************************************************************************
+*/
+package org.apache.tika.parser.txt;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Arrays;
+
+
+/**
+ * <code>CharsetDetector</code> provides a facility for detecting the
+ * charset or encoding of character data in an unknown format.
+ * The input data can either be from an input stream or an array of bytes.
+ * The result of the detection operation is a list of possibly matching
+ * charsets, or, for simple use, you can just ask for a Java Reader that
+ * will will work over the input data.
+ * <p/>
+ * Character set detection is at best an imprecise operation.  The detection
+ * process will attempt to identify the charset that best matches the characteristics
+ * of the byte data, but the process is partly statistical in nature, and
+ * the results can not be guaranteed to always be correct.
+ * <p/>
+ * For best accuracy in charset detection, the input data should be primarily
+ * in a single language, and a minimum of a few hundred bytes worth of plain text
+ * in the language are needed.  The detection process will attempt to
+ * ignore html or xml style markup that could otherwise obscure the content.
+ * <p/>
+ * @stable ICU 3.4
+ */
+public class CharsetDetector {
+
+//   Question: Should we have getters corresponding to the setters for inut text
+//   and declared encoding?
+
+//   A thought: If we were to create our own type of Java Reader, we could defer
+//   figuring out an actual charset for data that starts out with too much English
+//   only ASCII until the user actually read through to something that didn't look
+//   like 7 bit English.  If  nothing else ever appeared, we would never need to
+//   actually choose the "real" charset.  All assuming that the application just
+//   wants the data, and doesn't care about a char set name.
+
+    /**
+     *   Constructor
+     * 
+     * @stable ICU 3.4
+     */
+    public CharsetDetector() {
+    }
+
+    /**
+     * Set the declared encoding for charset detection.
+     *  The declared encoding of an input text is an encoding obtained
+     *  from an http header or xml declaration or similar source that
+     *  can be provided as additional information to the charset detector.  
+     *  A match between a declared encoding and a possible detected encoding
+     *  will raise the quality of that detected encoding by a small delta,
+     *  and will also appear as a "reason" for the match.
+     * <p/>
+     * A declared encoding that is incompatible with the input data being
+     * analyzed will not be added to the list of possible encodings.
+     * 
+     *  @param encoding The declared encoding 
+     *
+     * @stable ICU 3.4
+     */
+    public CharsetDetector setDeclaredEncoding(String encoding) {
+        fDeclaredEncoding = encoding;
+        return this;
+    }
+    
+    /**
+     * Set the input text (byte) data whose charset is to be detected.
+     * 
+     * @param in the input text of unknown encoding
+     * 
+     * @return This CharsetDetector
+     *
+     * @stable ICU 3.4
+     */
+    public CharsetDetector setText(byte [] in) {
+        fRawInput  = in;
+        fRawLength = in.length;
+        
+        MungeInput();
+        
+        return this;
+    }
+    
+    private static final int kBufSize = 8000;
+
+    /**
+     * Set the input text (byte) data whose charset is to be detected.
+     *  <p/>
+     *   The input stream that supplies the character data must have markSupported()
+     *   == true; the charset detection process will read a small amount of data,
+     *   then return the stream to its original position via
+     *   the InputStream.reset() operation.  The exact amount that will
+     *   be read depends on the characteristics of the data itself.
+     *
+     * @param in the input text of unknown encoding
+     * 
+     * @return This CharsetDetector
+     *
+     * @stable ICU 3.4
+     */
+    
+    public CharsetDetector setText(InputStream in) throws IOException {
+        fInputStream = in;
+        fInputStream.mark(kBufSize);
+        fRawInput = new byte[kBufSize];   // Always make a new buffer because the
+                                          //   previous one may have come from the caller,
+                                          //   in which case we can't touch it.
+        fRawLength = 0;
+        int remainingLength = kBufSize;
+        while (remainingLength > 0 ) {
+            // read() may give data in smallish chunks, esp. for remote sources.  Hence, this loop.
+            int  bytesRead = fInputStream.read(fRawInput, fRawLength, remainingLength);
+            if (bytesRead <= 0) {
+                 break;
+            }
+            fRawLength += bytesRead;
+            remainingLength -= bytesRead;
+        }
+        fInputStream.reset();
+        
+        MungeInput();                     // Strip html markup, collect byte stats.
+        return this;
+    }
+
+  
+    /**
+     * Return the charset that best matches the supplied input data.
+     * 
+     * Note though, that because the detection 
+     * only looks at the start of the input data,
+     * there is a possibility that the returned charset will fail to handle
+     * the full set of input data.
+     * <p/>
+     * Raise an exception if 
+     *  <ul>
+     *    <li>no charset appears to match the data.</li>
+     *    <li>no input text has been provided</li>
+     *  </ul>
+     *
+     * @return a CharsetMatch object representing the best matching charset, or
+     *         <code>null</code> if there are no matches.
+     *
+     * @stable ICU 3.4
+     */
+    public CharsetMatch detect() {
+//   TODO:  A better implementation would be to copy the detect loop from
+//          detectAll(), and cut it short as soon as a match with a high confidence
+//          is found.  This is something to be done later, after things are otherwise
+//          working.
+        CharsetMatch matches[] = detectAll();
+        
+        if (matches == null || matches.length == 0) {
+            return null;
+        }
+        
+        return matches[0];
+     }
+    
+    /**
+     *  Return an array of all charsets that appear to be plausible
+     *  matches with the input data.  The array is ordered with the
+     *  best quality match first.
+     * <p/>
+     * Raise an exception if 
+     *  <ul>
+     *    <li>no charsets appear to match the input data.</li>
+     *    <li>no input text has been provided</li>
+     *  </ul>
+     * 
+     * @return An array of CharsetMatch objects representing possibly matching charsets.
+     *
+     * @stable ICU 3.4
+     */
+    public CharsetMatch[] detectAll() {
+        CharsetRecognizer csr;
+        int               i;
+        int               detectResults;
+        int               confidence;
+        ArrayList         matches = new ArrayList();
+        
+        //  Iterate over all possible charsets, remember all that
+        //    give a match quality > 0.
+        for (i=0; i<fCSRecognizers.size(); i++) {
+            csr = (CharsetRecognizer)fCSRecognizers.get(i);
+            detectResults = csr.match(this);
+            confidence = detectResults & 0x000000ff;
+            if (confidence > 0) {
+                CharsetMatch  m = new CharsetMatch(this, csr, confidence);
+                matches.add(m);
+            }
+        }
+        Collections.sort(matches);      // CharsetMatch compares on confidence
+        Collections.reverse(matches);   //  Put best match first.
+        CharsetMatch [] resultArray = new CharsetMatch[matches.size()];
+        resultArray = (CharsetMatch[]) matches.toArray(resultArray);
+        return resultArray;
+    }
+
+    
+    /**
+     * Autodetect the charset of an inputStream, and return a Java Reader
+     * to access the converted input data.
+     * <p/>
+     * This is a convenience method that is equivalent to
+     *   <code>this.setDeclaredEncoding(declaredEncoding).setText(in).detect().getReader();</code>
+     * <p/>
+     *   For the input stream that supplies the character data, markSupported()
+     *   must be true; the  charset detection will read a small amount of data,
+     *   then return the stream to its original position via
+     *   the InputStream.reset() operation.  The exact amount that will
+     *    be read depends on the characteristics of the data itself.
+     *<p/>
+     * Raise an exception if no charsets appear to match the input data.
+     * 
+     * @param in The source of the byte data in the unknown charset.
+     *
+     * @param declaredEncoding  A declared encoding for the data, if available,
+     *           or null or an empty string if none is available.
+     *
+     * @stable ICU 3.4
+     */
+    public Reader getReader(InputStream in, String declaredEncoding) {
+        fDeclaredEncoding = declaredEncoding;
+        
+        try {
+            setText(in);
+            
+            CharsetMatch match = detect();
+            
+            if (match == null) {
+                return null;
+            }
+            
+            return match.getReader();
+        } catch (IOException e) {
+            return null;
+        }
+    }
+
+    /**
+     * Autodetect the charset of an inputStream, and return a String
+     * containing the converted input data.
+     * <p/>
+     * This is a convenience method that is equivalent to
+     *   <code>this.setDeclaredEncoding(declaredEncoding).setText(in).detect().getString();</code>
+     *<p/>
+     * Raise an exception if no charsets appear to match the input data.
+     * 
+     * @param in The source of the byte data in the unknown charset.
+     *
+     * @param declaredEncoding  A declared encoding for the data, if available,
+     *           or null or an empty string if none is available.
+     *
+     * @stable ICU 3.4
+     */
+    public String getString(byte[] in, String declaredEncoding)
+    {
+        fDeclaredEncoding = declaredEncoding;
+       
+        try {
+            setText(in);
+            
+            CharsetMatch match = detect();
+            
+            if (match == null) {
+                return null;
+            }
+            
+            return match.getString(-1);
+        } catch (IOException e) {
+            return null;
+        }
+    }
+
+ 
+    /**
+     * Get the names of all char sets that can be recognized by the char set detector.
+     *
+     * @return an array of the names of all charsets that can be recognized
+     * by the charset detector.
+     *
+     * @stable ICU 3.4
+     */
+    public static String[] getAllDetectableCharsets() {
+        return fCharsetNames;
+    }
+    
+    /**
+     * Test whether or not input filtering is enabled.
+     * 
+     * @return <code>true</code> if input text will be filtered.
+     * 
+     * @see #enableInputFilter
+     *
+     * @stable ICU 3.4
+     */
+    public boolean inputFilterEnabled()
+    {
+        return fStripTags;
+    }
+    
+    /**
+     * Enable filtering of input text. If filtering is enabled,
+     * text within angle brackets ("<" and ">") will be removed
+     * before detection.
+     * 
+     * @param filter <code>true</code> to enable input text filtering.
+     * 
+     * @return The previous setting.
+     *
+     * @stable ICU 3.4
+     */
+    public boolean enableInputFilter(boolean filter)
+    {
+        boolean previous = fStripTags;
+        
+        fStripTags = filter;
+        
+        return previous;
+    }
+    
+    /*
+     *  MungeInput - after getting a set of raw input data to be analyzed, preprocess
+     *               it by removing what appears to be html markup.
+     */
+    private void MungeInput() {
+        int srci = 0;
+        int dsti = 0;
+        byte b;
+        boolean  inMarkup = false;
+        int      openTags = 0;
+        int      badTags  = 0;
+        
+        //
+        //  html / xml markup stripping.
+        //     quick and dirty, not 100% accurate, but hopefully good enough, statistically.
+        //     discard everything within < brackets >
+        //     Count how many total '<' and illegal (nested) '<' occur, so we can make some
+        //     guess as to whether the input was actually marked up at all.
+        if (fStripTags) {
+            for (srci = 0; srci < fRawLength && dsti < fInputBytes.length; srci++) {
+                b = fRawInput[srci];
+                if (b == (byte)'<') {
+                    if (inMarkup) {
+                        badTags++;
+                    }
+                    inMarkup = true;
+                    openTags++;
+                }
+                
+                if (! inMarkup) {
+                    fInputBytes[dsti++] = b;
+                }
+                
+                if (b == (byte)'>') {
+                    inMarkup = false;
+                }        
+            }
+            
+            fInputLen = dsti;
+        }
+        
+        //
+        //  If it looks like this input wasn't marked up, or if it looks like it's
+        //    essentially nothing but markup abandon the markup stripping.
+        //    Detection will have to work on the unstripped input.
+        //
+        if (openTags<5 || openTags/5 < badTags || 
+                (fInputLen < 100 && fRawLength>600)) {
+            int limit = fRawLength;
+            
+            if (limit > kBufSize) {
+                limit = kBufSize;
+            }
+            
+            for (srci=0; srci<limit; srci++) {
+                fInputBytes[srci] = fRawInput[srci];
+            }
+            fInputLen = srci;
+        }
+        
+        //
+        // Tally up the byte occurence statistics.
+        //   These are available for use by the various detectors.
+        //
+        Arrays.fill(fByteStats, (short)0);
+        for (srci=0; srci<fInputLen; srci++) {
+            int val = fInputBytes[srci] & 0x00ff;
+            fByteStats[val]++;
+        }
+        
+        fC1Bytes = false;
+        for (int i = 0x80; i <= 0x9F; i += 1) {
+            if (fByteStats[i] != 0) {
+                fC1Bytes = true;
+                break;
+            }
+        }
+     }
+
+    /*
+     *  The following items are accessed by individual CharsetRecongizers during
+     *     the recognition process
+     * 
+     */
+    byte[]      fInputBytes =       // The text to be checked.  Markup will have been
+                   new byte[kBufSize];  //   removed if appropriate.
+    
+    int         fInputLen;          // Length of the byte data in fInputText.
+    
+    short       fByteStats[] =      // byte frequency statistics for the input text.
+                   new short[256];  //   Value is percent, not absolute.
+                                    //   Value is rounded up, so zero really means zero occurences.
+    
+    boolean     fC1Bytes =          // True if any bytes in the range 0x80 - 0x9F are in the input;
+                   false;
+    
+    String      fDeclaredEncoding;
+    
+    
+
+    //
+    //  Stuff private to CharsetDetector
+    //
+    byte[]               fRawInput;     // Original, untouched input bytes.
+                                        //  If user gave us a byte array, this is it.
+                                        //  If user gave us a stream, it's read to a 
+                                        //  buffer here.
+    int                  fRawLength;    // Length of data in fRawInput array.
+    
+    InputStream          fInputStream;  // User's input stream, or null if the user
+                                        //   gave us a byte array.
+     
+    boolean              fStripTags =   // If true, setText() will strip tags from input text.
+                           false;
+    
+    
+    /*
+     * List of recognizers for all charsets known to the implementation.
+     */
+    private static ArrayList fCSRecognizers = createRecognizers();
+    private static String [] fCharsetNames;
+    
+    /*
+     * Create the singleton instances of the CharsetRecognizer classes
+     */
+    private static ArrayList createRecognizers() {
+        ArrayList recognizers = new ArrayList();
+        
+        recognizers.add(new CharsetRecog_UTF8());
+        
+        recognizers.add(new CharsetRecog_Unicode.CharsetRecog_UTF_16_BE());
+        recognizers.add(new CharsetRecog_Unicode.CharsetRecog_UTF_16_LE());
+        recognizers.add(new CharsetRecog_Unicode.CharsetRecog_UTF_32_BE());
+        recognizers.add(new CharsetRecog_Unicode.CharsetRecog_UTF_32_LE());
+        
+        recognizers.add(new CharsetRecog_mbcs.CharsetRecog_sjis());
+        recognizers.add(new CharsetRecog_2022.CharsetRecog_2022JP());
+        recognizers.add(new CharsetRecog_2022.CharsetRecog_2022CN());
+        recognizers.add(new CharsetRecog_2022.CharsetRecog_2022KR());
+        recognizers.add(new CharsetRecog_mbcs.CharsetRecog_euc.CharsetRecog_gb_18030());
+        recognizers.add(new CharsetRecog_mbcs.CharsetRecog_euc.CharsetRecog_euc_jp());
+        recognizers.add(new CharsetRecog_mbcs.CharsetRecog_euc.CharsetRecog_euc_kr());
+        recognizers.add(new CharsetRecog_mbcs.CharsetRecog_big5());
+        
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_1_da());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_1_de());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_1_en());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_1_es());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_1_fr());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_1_it());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_1_nl());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_1_no());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_1_pt());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_1_sv());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_2_cs());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_2_hu());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_2_pl());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_2_ro());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_5_ru());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_6_ar());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_7_el());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_8_I_he());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_8_he());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_windows_1251());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_windows_1256());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_KOI8_R());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_8859_9_tr());
+        
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_IBM424_he_rtl());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_IBM424_he_ltr());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_IBM420_ar_rtl());
+        recognizers.add(new CharsetRecog_sbcs.CharsetRecog_IBM420_ar_ltr());
+        
+        // Create an array of all charset names, as a side effect.
+        // Needed for the getAllDetectableCharsets() API.
+        String[] charsetNames = new String [recognizers.size()];
+        int out = 0;
+        
+        for (int i = 0; i < recognizers.size(); i++) {
+            String name = ((CharsetRecognizer)recognizers.get(i)).getName();
+            
+            if (out == 0 || ! name.equals(charsetNames[out - 1])) {
+                charsetNames[out++] = name;
+            }
+        }
+        
+        fCharsetNames = new String[out];
+        System.arraycopy(charsetNames, 0, fCharsetNames, 0, out);
+        
+        return recognizers;
+    }
+}

Added: lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetMatch.java
URL: http://svn.apache.org/viewvc/lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetMatch.java?rev=777643&view=auto
==============================================================================
--- lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetMatch.java (added)
+++ lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetMatch.java Fri May 22 18:21:59 2009
@@ -0,0 +1,263 @@
+/**
+*******************************************************************************
+* Copyright (C) 2005-2007, International Business Machines Corporation and    *
+* others. All Rights Reserved.                                                *
+*******************************************************************************
+*/
+package org.apache.tika.parser.txt;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Reader;
+
+
+/**
+ * This class represents a charset that has been identified by a CharsetDetector
+ * as a possible encoding for a set of input data.  From an instance of this
+ * class, you can ask for a confidence level in the charset identification,
+ * or for Java Reader or String to access the original byte data in Unicode form.
+ * <p/>
+ * Instances of this class are created only by CharsetDetectors.
+ * <p/>
+ * Note:  this class has a natural ordering that is inconsistent with equals.
+ *        The natural ordering is based on the match confidence value.
+ *
+ * @stable ICU 3.4
+ */
+public class CharsetMatch implements Comparable {
+
+    
+    /**
+     * Create a java.io.Reader for reading the Unicode character data corresponding
+     * to the original byte data supplied to the Charset detect operation.
+     * <p/>
+     * CAUTION:  if the source of the byte data was an InputStream, a Reader
+     * can be created for only one matching char set using this method.  If more 
+     * than one charset needs to be tried, the caller will need to reset
+     * the InputStream and create InputStreamReaders itself, based on the charset name.
+     *
+     * @return the Reader for the Unicode character data.
+     *
+     * @stable ICU 3.4
+     */
+    public Reader getReader() {
+        InputStream inputStream = fInputStream;
+        
+        if (inputStream == null) {
+            inputStream = new ByteArrayInputStream(fRawInput, 0, fRawLength);
+        }
+        
+        try {
+            inputStream.reset();
+            return new InputStreamReader(inputStream, getName());
+        } catch (IOException e) {
+            return null;
+        }
+    }
+
+    /**
+     * Create a Java String from Unicode character data corresponding
+     * to the original byte data supplied to the Charset detect operation.
+     *
+     * @return a String created from the converted input data.
+     *
+     * @stable ICU 3.4
+     */
+    public String getString()  throws java.io.IOException {
+        return getString(-1);
+
+    }
+
+    /**
+     * Create a Java String from Unicode character data corresponding
+     * to the original byte data supplied to the Charset detect operation.
+     * The length of the returned string is limited to the specified size;
+     * the string will be trunctated to this length if necessary.  A limit value of
+     * zero or less is ignored, and treated as no limit.
+     *
+     * @param maxLength The maximium length of the String to be created when the
+     *                  source of the data is an input stream, or -1 for
+     *                  unlimited length.
+     * @return a String created from the converted input data.
+     *
+     * @stable ICU 3.4
+     */
+    public String getString(int maxLength) throws java.io.IOException {
+        String result = null;
+        if (fInputStream != null) {
+            StringBuffer sb = new StringBuffer();
+            char[] buffer = new char[1024];
+            Reader reader = getReader();
+            int max = maxLength < 0? Integer.MAX_VALUE : maxLength;
+            int bytesRead = 0;
+            
+            while ((bytesRead = reader.read(buffer, 0, Math.min(max, 1024))) >= 0) {
+                sb.append(buffer, 0, bytesRead);
+                max -= bytesRead;
+            }
+            
+            reader.close();
+            
+            return sb.toString();
+        } else {
+            result = new String(fRawInput, getName());            
+        }
+        return result;
+
+    }
+    
+    /**
+     * Get an indication of the confidence in the charset detected.
+     * Confidence values range from 0-100, with larger numbers indicating
+     * a better match of the input data to the characteristics of the
+     * charset.
+     *
+     * @return the confidence in the charset match
+     *
+     * @stable ICU 3.4
+     */
+    public int getConfidence() {
+        return fConfidence;
+    }
+    
+
+    /**
+     * Bit flag indicating the match is based on the the encoding scheme.
+     *
+     * @see #getMatchType
+     * @stable ICU 3.4
+     */
+    static public final int ENCODING_SCHEME    = 1;
+    
+    /**
+     * Bit flag indicating the match is based on the presence of a BOM.
+     * 
+     * @see #getMatchType
+     * @stable ICU 3.4
+     */
+    static public final int BOM                = 2;
+    
+    /**
+     * Bit flag indicating he match is based on the declared encoding.
+     * 
+     * @see #getMatchType
+     * @stable ICU 3.4
+     */
+    static public final int DECLARED_ENCODING  = 4;
+    
+    /**
+     * Bit flag indicating the match is based on language statistics.
+     *
+     * @see #getMatchType
+     * @stable ICU 3.4
+     */
+    static public final int LANG_STATISTICS    = 8;
+    
+    /**
+     * Return flags indicating what it was about the input data 
+     * that caused this charset to be considered as a possible match.
+     * The result is a bitfield containing zero or more of the flags
+     * ENCODING_SCHEME, BOM, DECLARED_ENCODING, and LANG_STATISTICS.
+     * A result of zero means no information is available.
+     * <p>
+     * Note: currently, this method always returns zero.
+     * <p>
+     *
+     * @return the type of match found for this charset.
+     *
+     * @draft ICU 3.4
+     * @provisional This API might change or be removed in a future release.
+     */
+    public int getMatchType() {
+//      TODO: create a list of enum-like constants for common combinations of types of matches.
+        return 0;
+    }
+
+    /**
+     * Get the name of the detected charset.  
+     * The name will be one that can be used with other APIs on the
+     * platform that accept charset names.  It is the "Canonical name"
+     * as defined by the class java.nio.charset.Charset; for
+     * charsets that are registered with the IANA charset registry,
+     * this is the MIME-preferred registerd name.
+     *
+     * @see java.nio.charset.Charset
+     * @see java.io.InputStreamReader
+     *
+     * @return The name of the charset.
+     *
+     * @stable ICU 3.4
+     */
+    public String getName() {
+        return fRecognizer.getName();
+    }
+    
+    /**
+     * Get the ISO code for the language of the detected charset.  
+     *
+     * @return The ISO code for the language or <code>null</code> if the language cannot be determined.
+     *
+     * @stable ICU 3.4
+     */
+    public String getLanguage() {
+        return fRecognizer.getLanguage();
+    }
+
+    /**
+     * Compare to other CharsetMatch objects.
+     * Comparison is based on the match confidence value, which 
+     *   allows CharsetDetector.detectAll() to order its results. 
+     *
+     * @param o the CharsetMatch object to compare against.
+     * @return  a negative integer, zero, or a positive integer as the 
+     *          confidence level of this CharsetMatch
+     *          is less than, equal to, or greater than that of
+     *          the argument.
+     * @throws ClassCastException if the argument is not a CharsetMatch.
+     * @stable ICU 3.4
+     */
+    public int compareTo (Object o) {
+        CharsetMatch other = (CharsetMatch)o;
+        int compareResult = 0;
+        if (this.fConfidence > other.fConfidence) {
+            compareResult = 1;
+        } else if (this.fConfidence < other.fConfidence) {
+            compareResult = -1;
+        }
+        return compareResult;
+    }
+    
+    /*
+     *  Constructor.  Implementation internal
+     */
+    CharsetMatch(CharsetDetector det, CharsetRecognizer rec, int conf) {
+        fRecognizer = rec;
+        fConfidence = conf;
+        
+        // The references to the original aplication input data must be copied out
+        //   of the charset recognizer to here, in case the application resets the
+        //   recognizer before using this CharsetMatch.
+        if (det.fInputStream == null) {
+            // We only want the existing input byte data if it came straight from the user,
+            //   not if is just the head of a stream.
+            fRawInput    = det.fRawInput;
+            fRawLength   = det.fRawLength;
+        }
+        fInputStream = det.fInputStream;
+    }
+
+    
+    //
+    //   Private Data
+    //
+    private int                 fConfidence;
+    private CharsetRecognizer   fRecognizer;
+    private byte[]              fRawInput = null;     // Original, untouched input bytes.
+                                                      //  If user gave us a byte array, this is it.
+    private int                 fRawLength;           // Length of data in fRawInput array.
+
+    private InputStream         fInputStream = null;  // User's input stream, or null if the user
+                                                      //   gave us a byte array.
+}

Added: lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_2022.java
URL: http://svn.apache.org/viewvc/lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_2022.java?rev=777643&view=auto
==============================================================================
--- lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_2022.java (added)
+++ lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_2022.java Fri May 22 18:21:59 2009
@@ -0,0 +1,166 @@
+/*
+*******************************************************************************
+* Copyright (C) 2005 - 2008, International Business Machines Corporation and  *
+* others. All Rights Reserved.                                                *
+*******************************************************************************
+*/
+package org.apache.tika.parser.txt;
+
+/**
+ *  class CharsetRecog_2022  part of the ICU charset detection imlementation.
+ *                           This is a superclass for the individual detectors for
+ *                           each of the detectable members of the ISO 2022 family
+ *                           of encodings.
+ * 
+ *                           The separate classes are nested within this class.
+ * 
+ * @internal
+ */
+abstract class CharsetRecog_2022 extends CharsetRecognizer {
+
+    
+    /**
+     * Matching function shared among the 2022 detectors JP, CN and KR
+     * Counts up the number of legal an unrecognized escape sequences in
+     * the sample of text, and computes a score based on the total number &
+     * the proportion that fit the encoding.
+     * 
+     * 
+     * @param text the byte buffer containing text to analyse
+     * @param textLen  the size of the text in the byte.
+     * @param escapeSequences the byte escape sequences to test for.
+     * @return match quality, in the range of 0-100.
+     */
+    int   match(byte [] text, int textLen, byte [][] escapeSequences) {
+        int     i, j;
+        int     escN;
+        int     hits   = 0;
+        int     misses = 0;
+        int     shifts = 0;
+        int     quality;
+        scanInput:
+            for (i=0; i<textLen; i++) {
+                if (text[i] == 0x1b) {
+                    checkEscapes:
+                        for (escN=0; escN<escapeSequences.length; escN++) {
+                            byte [] seq = escapeSequences[escN];
+                            
+                            if ((textLen - i) < seq.length) {
+                                continue checkEscapes;
+                            }
+                            
+                            for (j=1; j<seq.length; j++) {
+                                if (seq[j] != text[i+j])  {
+                                    continue checkEscapes;
+                                }                                   
+                            }
+                            
+                            hits++; 
+                            i += seq.length-1;
+                            continue scanInput;
+                        }
+                
+                        misses++;                  
+                }
+                
+                if (text[i] == 0x0e || text[i] == 0x0f) {
+                    // Shift in/out
+                    shifts++;
+                }
+            }
+        
+        if (hits == 0) {
+            return 0;
+        }
+        
+        //
+        // Initial quality is based on relative proportion of recongized vs.
+        //   unrecognized escape sequences. 
+        //   All good:  quality = 100;
+        //   half or less good: quality = 0;
+        //   linear inbetween.
+        quality = (100*hits - 100*misses) / (hits + misses);
+        
+        // Back off quality if there were too few escape sequences seen.
+        //   Include shifts in this computation, so that KR does not get penalized
+        //   for having only a single Escape sequence, but many shifts.
+        if (hits+shifts < 5) {
+            quality -= (5-(hits+shifts))*10;
+        }
+        
+        if (quality < 0) {
+            quality = 0;
+        }        
+        return quality;
+    }
+
+    
+ 
+    
+    static class CharsetRecog_2022JP extends CharsetRecog_2022 {
+        private byte [] [] escapeSequences = {
+                {0x1b, 0x24, 0x28, 0x43},   // KS X 1001:1992
+                {0x1b, 0x24, 0x28, 0x44},   // JIS X 212-1990
+                {0x1b, 0x24, 0x40},         // JIS C 6226-1978
+                {0x1b, 0x24, 0x41},         // GB 2312-80
+                {0x1b, 0x24, 0x42},         // JIS X 208-1983
+                {0x1b, 0x26, 0x40},         // JIS X 208 1990, 1997
+                {0x1b, 0x28, 0x42},         // ASCII
+                {0x1b, 0x28, 0x48},         // JIS-Roman
+                {0x1b, 0x28, 0x49},         // Half-width katakana
+                {0x1b, 0x28, 0x4a},         // JIS-Roman
+                {0x1b, 0x2e, 0x41},         // ISO 8859-1
+                {0x1b, 0x2e, 0x46}          // ISO 8859-7
+                };
+        
+        String getName() {
+            return "ISO-2022-JP";
+        }
+        
+        int   match(CharsetDetector det) {
+            return match(det.fInputBytes, det.fInputLen, escapeSequences);
+        }
+    }
+
+    static class CharsetRecog_2022KR extends CharsetRecog_2022 {
+        private byte [] [] escapeSequences = {
+                {0x1b, 0x24, 0x29, 0x43}   
+                 };
+        
+        String getName() {
+            return "ISO-2022-KR";
+        }
+        
+        int   match(CharsetDetector det) {
+            return match(det.fInputBytes, det.fInputLen, escapeSequences);
+        }
+        
+    }
+
+    static class CharsetRecog_2022CN extends CharsetRecog_2022 {
+        private byte [] [] escapeSequences = {
+                {0x1b, 0x24, 0x29, 0x41},   // GB 2312-80
+                {0x1b, 0x24, 0x29, 0x47},   // CNS 11643-1992 Plane 1
+                {0x1b, 0x24, 0x2A, 0x48},   // CNS 11643-1992 Plane 2
+                {0x1b, 0x24, 0x29, 0x45},   // ISO-IR-165
+                {0x1b, 0x24, 0x2B, 0x49},   // CNS 11643-1992 Plane 3
+                {0x1b, 0x24, 0x2B, 0x4A},   // CNS 11643-1992 Plane 4
+                {0x1b, 0x24, 0x2B, 0x4B},   // CNS 11643-1992 Plane 5
+                {0x1b, 0x24, 0x2B, 0x4C},   // CNS 11643-1992 Plane 6
+                {0x1b, 0x24, 0x2B, 0x4D},   // CNS 11643-1992 Plane 7
+                {0x1b, 0x4e},               // SS2
+                {0x1b, 0x4f},               // SS3
+        };
+        
+        String getName() {
+            return "ISO-2022-CN";
+        }
+        
+        
+        int   match(CharsetDetector det) {
+            return match(det.fInputBytes, det.fInputLen, escapeSequences);
+        }
+    }
+    
+    }
+

Added: lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_UTF8.java
URL: http://svn.apache.org/viewvc/lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_UTF8.java?rev=777643&view=auto
==============================================================================
--- lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_UTF8.java (added)
+++ lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_UTF8.java Fri May 22 18:21:59 2009
@@ -0,0 +1,99 @@
+/**
+*******************************************************************************
+* Copyright (C) 2005 - 2007, International Business Machines Corporation and  *
+* others. All Rights Reserved.                                                *
+*******************************************************************************
+*/
+package org.apache.tika.parser.txt;
+
+/**
+ * Charset recognizer for UTF-8
+ *
+ * @internal
+ */
+class CharsetRecog_UTF8 extends CharsetRecognizer {
+
+    String getName() {
+        return "UTF-8";
+    }
+
+    /* (non-Javadoc)
+     * @see com.ibm.icu.text.CharsetRecognizer#match(com.ibm.icu.text.CharsetDetector)
+     */
+    int match(CharsetDetector det) {
+        boolean     hasBOM = false;
+        int         numValid = 0;
+        int         numInvalid = 0;
+        byte        input[] = det.fRawInput;
+        int         i;
+        int         trailBytes = 0;
+        int         confidence;
+        
+        if (det.fRawLength >= 3 && 
+                (input[0] & 0xFF) == 0xef && (input[1] & 0xFF) == 0xbb & (input[2] & 0xFF) == 0xbf) {
+            hasBOM = true;
+        }
+        
+        // Scan for multi-byte sequences
+        for (i=0; i<det.fRawLength; i++) {
+            int b = input[i];
+            if ((b & 0x80) == 0) {
+                continue;   // ASCII
+            }
+            
+            // Hi bit on char found.  Figure out how long the sequence should be
+            if ((b & 0x0e0) == 0x0c0) {
+                trailBytes = 1;                
+            } else if ((b & 0x0f0) == 0x0e0) {
+                trailBytes = 2;
+            } else if ((b & 0x0f8) == 0xf0) {
+                trailBytes = 3;
+            } else {
+                numInvalid++;
+                if (numInvalid > 5) {
+                    break;
+                }
+                trailBytes = 0;
+            }
+                
+            // Verify that we've got the right number of trail bytes in the sequence
+            for (;;) {
+                i++;
+                if (i>=det.fRawLength) {
+                    break;
+                }
+                b = input[i];
+                if ((b & 0xc0) != 0x080) {
+                    numInvalid++;
+                    break;
+                }
+                if (--trailBytes == 0) {
+                    numValid++;
+                    break;
+                }
+            }
+                        
+        }
+        
+        // Cook up some sort of confidence score, based on presense of a BOM
+        //    and the existence of valid and/or invalid multi-byte sequences.
+        confidence = 0;
+        if (hasBOM && numInvalid==0) {
+            confidence = 100;
+        } else if (hasBOM && numValid > numInvalid*10) {
+            confidence = 80;
+        } else if (numValid > 3 && numInvalid == 0) {
+            confidence = 100;            
+        } else if (numValid > 0 && numInvalid == 0) {
+            confidence = 80;
+        } else if (numValid == 0 && numInvalid == 0) {
+            // Plain ASCII.  
+            confidence = 10;            
+        } else if (numValid > numInvalid*10) {
+            // Probably corruput utf-8 data.  Valid sequences aren't likely by chance.
+            confidence = 25;
+        }
+        return confidence;
+    }
+
+}

Added: lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_Unicode.java
URL: http://svn.apache.org/viewvc/lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_Unicode.java?rev=777643&view=auto
==============================================================================
--- lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_Unicode.java (added)
+++ lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_Unicode.java Fri May 22 18:21:59 2009
@@ -0,0 +1,154 @@
+/*
+ *******************************************************************************
+ * Copyright (C) 1996-2007, International Business Machines Corporation and    *
+ * others. All Rights Reserved.                                                *
+ *******************************************************************************
+ *
+ */
+package org.apache.tika.parser.txt;
+
+/**
+ * This class matches UTF-16 and UTF-32, both big- and little-endian. The
+ * BOM will be used if it is present.
+ * 
+ * @internal
+ */
+abstract class CharsetRecog_Unicode extends CharsetRecognizer {
+
+    /* (non-Javadoc)
+     * @see com.ibm.icu.text.CharsetRecognizer#getName()
+     */
+    abstract String getName();
+
+    /* (non-Javadoc)
+     * @see com.ibm.icu.text.CharsetRecognizer#match(com.ibm.icu.text.CharsetDetector)
+     */
+    abstract int match(CharsetDetector det);
+    
+    static class CharsetRecog_UTF_16_BE extends CharsetRecog_Unicode
+    {
+        String getName()
+        {
+            return "UTF-16BE";
+        }
+        
+        int match(CharsetDetector det)
+        {
+            byte[] input = det.fRawInput;
+            
+            if (input.length>=2 && ((input[0] & 0xFF) == 0xFE && (input[1] & 0xFF) == 0xFF)) {
+                return 100;
+            }
+            
+            // TODO: Do some statistics to check for unsigned UTF-16BE
+            return 0;
+        }
+    }
+    
+    static class CharsetRecog_UTF_16_LE extends CharsetRecog_Unicode
+    {
+        String getName()
+        {
+            return "UTF-16LE";
+        }
+        
+        int match(CharsetDetector det)
+        {
+            byte[] input = det.fRawInput;
+            
+            if (input.length >= 2 && ((input[0] & 0xFF) == 0xFF && (input[1] & 0xFF) == 0xFE))
+            {
+               // An LE BOM is present.
+               if (input.length>=4 && input[2] == 0x00 && input[3] == 0x00) {
+                   // It is probably UTF-32 LE, not UTF-16
+                   return 0;
+               }
+               return 100;
+            }        
+            
+            // TODO: Do some statistics to check for unsigned UTF-16LE
+            return 0;
+        }
+    }
+    
+    static abstract class CharsetRecog_UTF_32 extends CharsetRecog_Unicode
+    {
+        abstract int getChar(byte[] input, int index);
+        
+        abstract String getName();
+        
+        int match(CharsetDetector det)
+        {
+            byte[] input   = det.fRawInput;
+            int limit      = (det.fRawLength / 4) * 4;
+            int numValid   = 0;
+            int numInvalid = 0;
+            boolean hasBOM = false;
+            int confidence = 0;
+            
+            if (limit==0) {
+                return 0;
+            }
+            if (getChar(input, 0) == 0x0000FEFF) {
+                hasBOM = true;
+            }
+            
+            for(int i = 0; i < limit; i += 4) {
+                int ch = getChar(input, i);
+                
+                if (ch < 0 || ch >= 0x10FFFF || (ch >= 0xD800 && ch <= 0xDFFF)) {
+                    numInvalid += 1;
+                } else {
+                    numValid += 1;
+                }
+            }
+            
+            
+            // Cook up some sort of confidence score, based on presence of a BOM
+            //    and the existence of valid and/or invalid multi-byte sequences.
+            if (hasBOM && numInvalid==0) {
+                confidence = 100;
+            } else if (hasBOM && numValid > numInvalid*10) {
+                confidence = 80;
+            } else if (numValid > 3 && numInvalid == 0) {
+                confidence = 100;            
+            } else if (numValid > 0 && numInvalid == 0) {
+                confidence = 80;
+            } else if (numValid > numInvalid*10) {
+                // Probably corrupt UTF-32BE data.  Valid sequences aren't likely by chance.
+                confidence = 25;
+            }
+            
+            return confidence;
+        }
+    }
+    
+    static class CharsetRecog_UTF_32_BE extends CharsetRecog_UTF_32
+    {
+        int getChar(byte[] input, int index)
+        {
+            return (input[index + 0] & 0xFF) << 24 | (input[index + 1] & 0xFF) << 16 |
+                   (input[index + 2] & 0xFF) <<  8 | (input[index + 3] & 0xFF);
+        }
+        
+        String getName()
+        {
+            return "UTF-32BE";
+        }
+    }
+
+    
+    static class CharsetRecog_UTF_32_LE extends CharsetRecog_UTF_32
+    {
+        int getChar(byte[] input, int index)
+        {
+            return (input[index + 3] & 0xFF) << 24 | (input[index + 2] & 0xFF) << 16 |
+                   (input[index + 1] & 0xFF) <<  8 | (input[index + 0] & 0xFF);
+        }
+        
+        String getName()
+        {
+            return "UTF-32LE";
+        }
+    }
+}

Added: lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_mbcs.java
URL: http://svn.apache.org/viewvc/lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_mbcs.java?rev=777643&view=auto
==============================================================================
--- lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_mbcs.java (added)
+++ lucene/tika/trunk/tika-parsers/src/main/java/org/apache/tika/parser/txt/CharsetRecog_mbcs.java Fri May 22 18:21:59 2009
@@ -0,0 +1,542 @@
+/*
+ ****************************************************************************
+ * Copyright (C) 2005-2008, International Business Machines Corporation and *
+ * others. All Rights Reserved.                                             *
+ ****************************************************************************
+ *
+ */
+package org.apache.tika.parser.txt;
+
+import java.util.Arrays;
+
+/**
+ * CharsetRecognizer implemenation for Asian  - double or multi-byte - charsets.
+ *                   Match is determined mostly by the input data adhering to the
+ *                   encoding scheme for the charset, and, optionally,
+ *                   frequency-of-occurence of characters.
+ * <p/>
+ *                   Instances of this class are singletons, one per encoding
+ *                   being recognized.  They are created in the main
+ *                   CharsetDetector class and kept in the global list of available
+ *                   encodings to be checked.  The specific encoding being recognized
+ *                   is determined by subclass.
+ * 
+ * @internal                  
+ */
+abstract class CharsetRecog_mbcs extends CharsetRecognizer {
+
+   /**
+     * Get the IANA name of this charset.
+     * @return the charset name.
+     */
+    abstract String      getName() ;
+    
+    
+    /**
+     * Test the match of this charset with the input text data
+     *      which is obtained via the CharsetDetector object.
+     * 
+     * @param det  The CharsetDetector, which contains the input text
+     *             to be checked for being in this charset.
+     * @return     Two values packed into one int  (Damn java, anyhow)
+     *             <br/>
+     *             bits 0-7:  the match confidence, ranging from 0-100
+     *             <br/>
+     *             bits 8-15: The match reason, an enum-like value.
+     */
+    int match(CharsetDetector det, int [] commonChars) {
+        int   singleByteCharCount = 0;
+        int   doubleByteCharCount = 0;
+        int   commonCharCount     = 0;
+        int   badCharCount        = 0;
+        int   totalCharCount      = 0;
+        int   confidence          = 0;
+        iteratedChar   iter       = new iteratedChar();
+        
+        detectBlock: {
+            for (iter.reset(); nextChar(iter, det);) {
+                totalCharCount++;
+                if (iter.error) {
+                    badCharCount++; 
+                } else {
+                    long cv = iter.charValue & 0xFFFFFFFFL;
+                                        
+                    if (cv <= 0xff) {
+                        singleByteCharCount++;
+                    } else {
+                        doubleByteCharCount++;
+                        if (commonChars != null) {
+                            // NOTE: This assumes that there are no 4-byte common chars.
+                            if (Arrays.binarySearch(commonChars, (int) cv) >= 0) {
+                                commonCharCount++;
+                            }
+                        }
+                    }
+                }
+                if (badCharCount >= 2 && badCharCount*5 >= doubleByteCharCount) {
+                    // Bail out early if the byte data is not matching the encoding scheme.
+                    break detectBlock;
+                }
+            }
+            
+            if (doubleByteCharCount <= 10 && badCharCount== 0) {
+                // Not many multi-byte chars.
+                if (doubleByteCharCount == 0 && totalCharCount < 10) {
+                    // There weren't any multibyte sequences, and there was a low density of non-ASCII single bytes.
+                    // We don't have enough data to have any confidence.
+                    // Statistical analysis of single byte non-ASCII charcters would probably help here.
+                    confidence = 0;
+                }
+                else {
+                    //   ASCII or ISO file?  It's probably not our encoding,
+                    //   but is not incompatible with our encoding, so don't give it a zero.
+                    confidence = 10;
+                }
+                
+                break detectBlock;
+            }
+            
+            //
+            //  No match if there are too many characters that don't fit the encoding scheme.
+            //    (should we have zero tolerance for these?)
+            //
+            if (doubleByteCharCount < 20*badCharCount) {
+                confidence = 0;
+                break detectBlock;
+            }
+            
+            if (commonChars == null) {
+                // We have no statistics on frequently occuring characters.
+                //  Assess confidence purely on having a reasonable number of
+                //  multi-byte characters (the more the better
+                confidence = 30 + doubleByteCharCount - 20*badCharCount;
+                if (confidence > 100) {
+                    confidence = 100;
+                }
+            }else {
+                //
+                // Frequency of occurence statistics exist.
+                //
+                double maxVal = Math.log((float)doubleByteCharCount / 4);
+                double scaleFactor = 90.0 / maxVal;
+                confidence = (int)(Math.log(commonCharCount+1) * scaleFactor + 10);
+                confidence = Math.min(confidence, 100);
+            }
+        }   // end of detectBlock:
+        
+        return confidence;
+    }
+    
+     // "Character"  iterated character class.
+     //    Recognizers for specific mbcs encodings make their "characters" available
+     //    by providing a nextChar() function that fills in an instance of iteratedChar
+     //    with the next char from the input.
+     //    The returned characters are not converted to Unicode, but remain as the raw
+     //    bytes (concatenated into an int) from the codepage data.
+     //
+     //  For Asian charsets, use the raw input rather than the input that has been
+     //   stripped of markup.  Detection only considers multi-byte chars, effectively
+     //   stripping markup anyway, and double byte chars do occur in markup too.
+     //
+     static class iteratedChar {
+         int             charValue = 0;             // 1-4 bytes from the raw input data
+         int             index     = 0;
+         int             nextIndex = 0;
+         boolean         error     = false;
+         boolean         done      = false;
+         
+         void reset() {
+             charValue = 0;
+             index     = -1;
+             nextIndex = 0;
+             error     = false;
+             done      = false;
+         }
+         
+         int nextByte(CharsetDetector det) {
+             if (nextIndex >= det.fRawLength) {
+                 done = true;
+                 return -1;
+             }
+             int byteValue = (int)det.fRawInput[nextIndex++] & 0x00ff;
+             return byteValue;
+         }       
+     }
+     
+     /**
+      * Get the next character (however many bytes it is) from the input data
+      *    Subclasses for specific charset encodings must implement this function
+      *    to get characters according to the rules of their encoding scheme.
+      * 
+      *  This function is not a method of class iteratedChar only because
+      *   that would require a lot of extra derived classes, which is awkward.
+      * @param it  The iteratedChar "struct" into which the returned char is placed.
+      * @param det The charset detector, which is needed to get at the input byte data
+      *            being iterated over.
+      * @return    True if a character was returned, false at end of input.
+      */
+     abstract boolean nextChar(iteratedChar it, CharsetDetector det);
+     
+
+
+     
+     
+     /**
+      *   Shift-JIS charset recognizer.   
+      *
+      */
+     static class CharsetRecog_sjis extends CharsetRecog_mbcs {
+         static int [] commonChars = 
+             // TODO:  This set of data comes from the character frequency-
+             //        of-occurence analysis tool.  The data needs to be moved
+             //        into a resource and loaded from there.
+            {0x8140, 0x8141, 0x8142, 0x8145, 0x815b, 0x8169, 0x816a, 0x8175, 0x8176, 0x82a0, 
+             0x82a2, 0x82a4, 0x82a9, 0x82aa, 0x82ab, 0x82ad, 0x82af, 0x82b1, 0x82b3, 0x82b5, 
+             0x82b7, 0x82bd, 0x82be, 0x82c1, 0x82c4, 0x82c5, 0x82c6, 0x82c8, 0x82c9, 0x82cc, 
+             0x82cd, 0x82dc, 0x82e0, 0x82e7, 0x82e8, 0x82e9, 0x82ea, 0x82f0, 0x82f1, 0x8341, 
+             0x8343, 0x834e, 0x834f, 0x8358, 0x835e, 0x8362, 0x8367, 0x8375, 0x8376, 0x8389, 
+             0x838a, 0x838b, 0x838d, 0x8393, 0x8e96, 0x93fa, 0x95aa};
+         
+         boolean nextChar(iteratedChar it, CharsetDetector det) {
+             it.index = it.nextIndex;
+             it.error = false;
+             int firstByte;
+             firstByte = it.charValue = it.nextByte(det);
+             if (firstByte < 0) {
+                 return false;
+             }
+             
+             if (firstByte <= 0x7f || (firstByte>0xa0 && firstByte<=0xdf)) {
+                 return true;
+             }
+             
+             int secondByte = it.nextByte(det);
+             if (secondByte < 0)  {
+                 return false;          
+             }
+             it.charValue = (firstByte << 8) | secondByte;
+             if (! ((secondByte>=0x40 && secondByte<=0x7f) || (secondByte>=0x80 && secondByte<=0xff))) {
+                 // Illegal second byte value.
+                 it.error = true;
+             }
+             return true;
+         }
+         
+         int match(CharsetDetector det) {
+             return match(det, commonChars);
+         }
+         
+         String getName() {
+             return "Shift_JIS";
+         }
+         
+         public String getLanguage()
+         {
+             return "ja";
+         }
+
+         
+     }
+     
+     
+     /**
+      *   Big5 charset recognizer.   
+      *
+      */
+     static class CharsetRecog_big5 extends CharsetRecog_mbcs {
+         static int [] commonChars = 
+             // TODO:  This set of data comes from the character frequency-
+             //        of-occurence analysis tool.  The data needs to be moved
+             //        into a resource and loaded from there.
+            {0xa140, 0xa141, 0xa142, 0xa143, 0xa147, 0xa149, 0xa175, 0xa176, 0xa440, 0xa446, 
+             0xa447, 0xa448, 0xa451, 0xa454, 0xa457, 0xa464, 0xa46a, 0xa46c, 0xa477, 0xa4a3, 
+             0xa4a4, 0xa4a7, 0xa4c1, 0xa4ce, 0xa4d1, 0xa4df, 0xa4e8, 0xa4fd, 0xa540, 0xa548, 
+             0xa558, 0xa569, 0xa5cd, 0xa5e7, 0xa657, 0xa661, 0xa662, 0xa668, 0xa670, 0xa6a8, 
+             0xa6b3, 0xa6b9, 0xa6d3, 0xa6db, 0xa6e6, 0xa6f2, 0xa740, 0xa751, 0xa759, 0xa7da, 
+             0xa8a3, 0xa8a5, 0xa8ad, 0xa8d1, 0xa8d3, 0xa8e4, 0xa8fc, 0xa9c0, 0xa9d2, 0xa9f3, 
+             0xaa6b, 0xaaba, 0xaabe, 0xaacc, 0xaafc, 0xac47, 0xac4f, 0xacb0, 0xacd2, 0xad59, 
+             0xaec9, 0xafe0, 0xb0ea, 0xb16f, 0xb2b3, 0xb2c4, 0xb36f, 0xb44c, 0xb44e, 0xb54c, 
+             0xb5a5, 0xb5bd, 0xb5d0, 0xb5d8, 0xb671, 0xb7ed, 0xb867, 0xb944, 0xbad8, 0xbb44, 
+             0xbba1, 0xbdd1, 0xc2c4, 0xc3b9, 0xc440, 0xc45f};
+          
+         boolean nextChar(iteratedChar it, CharsetDetector det) {
+             it.index = it.nextIndex;
+             it.error = false;
+             int firstByte;
+             firstByte = it.charValue = it.nextByte(det);
+             if (firstByte < 0) {
+                 return false;
+             }
+             
+             if (firstByte <= 0x7f || firstByte==0xff) {
+                 // single byte character.
+                 return true;
+             }
+             
+             int secondByte = it.nextByte(det);
+             if (secondByte < 0)  {
+                 return false;          
+             }
+             it.charValue = (it.charValue << 8) | secondByte;
+
+             if (secondByte < 0x40 ||
+                 secondByte ==0x7f ||
+                 secondByte == 0xff) {
+                     it.error = true;
+             }
+             return true;
+         }
+         
+         int match(CharsetDetector det) {
+             return match(det, commonChars);
+         }
+         
+         String getName() {
+             return "Big5";
+         }
+         
+         
+         public String getLanguage()
+         {
+             return "zh";
+         }
+     }
+     
+     
+     /**
+      *   EUC charset recognizers.  One abstract class that provides the common function
+      *             for getting the next character according to the EUC encoding scheme,
+      *             and nested derived classes for EUC_KR, EUC_JP, EUC_CN.   
+      *
+      */
+     abstract static class CharsetRecog_euc extends CharsetRecog_mbcs {
+         
+         /*
+          *  (non-Javadoc)
+          *  Get the next character value for EUC based encodings.
+          *  Character "value" is simply the raw bytes that make up the character
+          *     packed into an int.
+          */
+         boolean nextChar(iteratedChar it, CharsetDetector det) {
+             it.index = it.nextIndex;
+             it.error = false;
+             int firstByte  = 0;
+             int secondByte = 0;
+             int thirdByte  = 0;
+             //int fourthByte = 0;
+             
+             buildChar: {
+                 firstByte = it.charValue = it.nextByte(det);                 
+                 if (firstByte < 0) {
+                     // Ran off the end of the input data
+                     it.done = true;
+                     break buildChar;
+                 }
+                 if (firstByte <= 0x8d) {
+                     // single byte char
+                     break buildChar;
+                 }
+                 
+                 secondByte = it.nextByte(det);
+                 it.charValue = (it.charValue << 8) | secondByte;
+                 
+                 if (firstByte >= 0xA1 && firstByte <= 0xfe) {
+                     // Two byte Char
+                     if (secondByte < 0xa1) {
+                         it.error = true;
+                     }
+                     break buildChar;
+                 }
+                 if (firstByte == 0x8e) {
+                     // Code Set 2.
+                     //   In EUC-JP, total char size is 2 bytes, only one byte of actual char value.
+                     //   In EUC-TW, total char size is 4 bytes, three bytes contribute to char value.
+                     // We don't know which we've got.
+                     // Treat it like EUC-JP.  If the data really was EUC-TW, the following two
+                     //   bytes will look like a well formed 2 byte char.  
+                     if (secondByte < 0xa1) {
+                         it.error = true;
+                     }
+                     break buildChar;                     
+                 }
+                 
+                 if (firstByte == 0x8f) {
+                     // Code set 3.
+                     // Three byte total char size, two bytes of actual char value.
+                     thirdByte    = it.nextByte(det);
+                     it.charValue = (it.charValue << 8) | thirdByte;
+                     if (thirdByte < 0xa1) {
+                         it.error = true;
+                     }
+                 }
+              }
+             
+             return (it.done == false);
+         }
+         
+         /**
+          * The charset recognize for EUC-JP.  A singleton instance of this class
+          *    is created and kept by the public CharsetDetector class
+          */
+         static class CharsetRecog_euc_jp extends CharsetRecog_euc {
+             static int [] commonChars = 
+                 // TODO:  This set of data comes from the character frequency-
+                 //        of-occurence analysis tool.  The data needs to be moved
+                 //        into a resource and loaded from there.
+                {0xa1a1, 0xa1a2, 0xa1a3, 0xa1a6, 0xa1bc, 0xa1ca, 0xa1cb, 0xa1d6, 0xa1d7, 0xa4a2, 
+                 0xa4a4, 0xa4a6, 0xa4a8, 0xa4aa, 0xa4ab, 0xa4ac, 0xa4ad, 0xa4af, 0xa4b1, 0xa4b3, 
+                 0xa4b5, 0xa4b7, 0xa4b9, 0xa4bb, 0xa4bd, 0xa4bf, 0xa4c0, 0xa4c1, 0xa4c3, 0xa4c4, 
+                 0xa4c6, 0xa4c7, 0xa4c8, 0xa4c9, 0xa4ca, 0xa4cb, 0xa4ce, 0xa4cf, 0xa4d0, 0xa4de, 
+                 0xa4df, 0xa4e1, 0xa4e2, 0xa4e4, 0xa4e8, 0xa4e9, 0xa4ea, 0xa4eb, 0xa4ec, 0xa4ef, 
+                 0xa4f2, 0xa4f3, 0xa5a2, 0xa5a3, 0xa5a4, 0xa5a6, 0xa5a7, 0xa5aa, 0xa5ad, 0xa5af, 
+                 0xa5b0, 0xa5b3, 0xa5b5, 0xa5b7, 0xa5b8, 0xa5b9, 0xa5bf, 0xa5c3, 0xa5c6, 0xa5c7, 
+                 0xa5c8, 0xa5c9, 0xa5cb, 0xa5d0, 0xa5d5, 0xa5d6, 0xa5d7, 0xa5de, 0xa5e0, 0xa5e1, 
+                 0xa5e5, 0xa5e9, 0xa5ea, 0xa5eb, 0xa5ec, 0xa5ed, 0xa5f3, 0xb8a9, 0xb9d4, 0xbaee, 
+                 0xbbc8, 0xbef0, 0xbfb7, 0xc4ea, 0xc6fc, 0xc7bd, 0xcab8, 0xcaf3, 0xcbdc, 0xcdd1};             
+             String getName() {
+                 return "EUC-JP";
+             }
+             
+             int match(CharsetDetector det) {
+                 return match(det, commonChars);
+             }
+             
+             public String getLanguage()
+             {
+                 return "ja";
+             }
+         }
+         
+         /**
+          * The charset recognize for EUC-KR.  A singleton instance of this class
+          *    is created and kept by the public CharsetDetector class
+          */
+         static class CharsetRecog_euc_kr extends CharsetRecog_euc {
+             static int [] commonChars = 
+                 // TODO:  This set of data comes from the character frequency-
+                 //        of-occurence analysis tool.  The data needs to be moved
+                 //        into a resource and loaded from there.
+                {0xb0a1, 0xb0b3, 0xb0c5, 0xb0cd, 0xb0d4, 0xb0e6, 0xb0ed, 0xb0f8, 0xb0fa, 0xb0fc, 
+                 0xb1b8, 0xb1b9, 0xb1c7, 0xb1d7, 0xb1e2, 0xb3aa, 0xb3bb, 0xb4c2, 0xb4cf, 0xb4d9, 
+                 0xb4eb, 0xb5a5, 0xb5b5, 0xb5bf, 0xb5c7, 0xb5e9, 0xb6f3, 0xb7af, 0xb7c2, 0xb7ce, 
+                 0xb8a6, 0xb8ae, 0xb8b6, 0xb8b8, 0xb8bb, 0xb8e9, 0xb9ab, 0xb9ae, 0xb9cc, 0xb9ce, 
+                 0xb9fd, 0xbab8, 0xbace, 0xbad0, 0xbaf1, 0xbbe7, 0xbbf3, 0xbbfd, 0xbcad, 0xbcba, 
+                 0xbcd2, 0xbcf6, 0xbdba, 0xbdc0, 0xbdc3, 0xbdc5, 0xbec6, 0xbec8, 0xbedf, 0xbeee, 
+                 0xbef8, 0xbefa, 0xbfa1, 0xbfa9, 0xbfc0, 0xbfe4, 0xbfeb, 0xbfec, 0xbff8, 0xc0a7, 
+                 0xc0af, 0xc0b8, 0xc0ba, 0xc0bb, 0xc0bd, 0xc0c7, 0xc0cc, 0xc0ce, 0xc0cf, 0xc0d6, 
+                 0xc0da, 0xc0e5, 0xc0fb, 0xc0fc, 0xc1a4, 0xc1a6, 0xc1b6, 0xc1d6, 0xc1df, 0xc1f6, 
+                 0xc1f8, 0xc4a1, 0xc5cd, 0xc6ae, 0xc7cf, 0xc7d1, 0xc7d2, 0xc7d8, 0xc7e5, 0xc8ad};
+             
+             String getName() {
+                 return "EUC-KR";
+             }
+             
+             int match(CharsetDetector det) {
+                 return match(det, commonChars);
+             }
+             
+             public String getLanguage()
+             {
+                 return "ko";
+             }
+         }
+     }
+     
+     /**
+      * 
+      *   GB-18030 recognizer. Uses simplified Chinese statistics.   
+      *
+      */
+     static class CharsetRecog_gb_18030 extends CharsetRecog_mbcs {
+         
+         /*
+          *  (non-Javadoc)
+          *  Get the next character value for EUC based encodings.
+          *  Character "value" is simply the raw bytes that make up the character
+          *     packed into an int.
+          */
+         boolean nextChar(iteratedChar it, CharsetDetector det) {
+             it.index = it.nextIndex;
+             it.error = false;
+             int firstByte  = 0;
+             int secondByte = 0;
+             int thirdByte  = 0;
+             int fourthByte = 0;
+             
+             buildChar: {
+                 firstByte = it.charValue = it.nextByte(det); 
+                 
+                 if (firstByte < 0) {
+                     // Ran off the end of the input data
+                     it.done = true;
+                     break buildChar;
+                 }
+                 
+                 if (firstByte <= 0x80) {
+                     // single byte char
+                     break buildChar;
+                 }
+                 
+                 secondByte = it.nextByte(det);
+                 it.charValue = (it.charValue << 8) | secondByte;
+                 
+                 if (firstByte >= 0x81 && firstByte <= 0xFE) {
+                     // Two byte Char
+                     if ((secondByte >= 0x40 && secondByte <= 0x7E) || (secondByte >=80 && secondByte <=0xFE)) {
+                         break buildChar;
+                     }
+                     
+                     // Four byte char
+                     if (secondByte >= 0x30 && secondByte <= 0x39) {
+                         thirdByte = it.nextByte(det);
+                         
+                         if (thirdByte >= 0x81 && thirdByte <= 0xFE) {
+                             fourthByte = it.nextByte(det);
+                             
+                             if (fourthByte >= 0x30 && fourthByte <= 0x39) {
+                                 it.charValue = (it.charValue << 16) | (thirdByte << 8) | fourthByte;
+                                 break buildChar;
+                             }
+                         }
+                     }
+                     
+                     it.error = true;
+                     break buildChar;
+                 }
+             }
+                 
+             return (it.done == false);
+         }
+         
+         static int [] commonChars = 
+             // TODO:  This set of data comes from the character frequency-
+             //        of-occurence analysis tool.  The data needs to be moved
+             //        into a resource and loaded from there.
+            {0xa1a1, 0xa1a2, 0xa1a3, 0xa1a4, 0xa1b0, 0xa1b1, 0xa1f1, 0xa1f3, 0xa3a1, 0xa3ac, 
+             0xa3ba, 0xb1a8, 0xb1b8, 0xb1be, 0xb2bb, 0xb3c9, 0xb3f6, 0xb4f3, 0xb5bd, 0xb5c4, 
+             0xb5e3, 0xb6af, 0xb6d4, 0xb6e0, 0xb7a2, 0xb7a8, 0xb7bd, 0xb7d6, 0xb7dd, 0xb8b4, 
+             0xb8df, 0xb8f6, 0xb9ab, 0xb9c9, 0xb9d8, 0xb9fa, 0xb9fd, 0xbacd, 0xbba7, 0xbbd6, 
+             0xbbe1, 0xbbfa, 0xbcbc, 0xbcdb, 0xbcfe, 0xbdcc, 0xbecd, 0xbedd, 0xbfb4, 0xbfc6, 
+             0xbfc9, 0xc0b4, 0xc0ed, 0xc1cb, 0xc2db, 0xc3c7, 0xc4dc, 0xc4ea, 0xc5cc, 0xc6f7, 
+             0xc7f8, 0xc8ab, 0xc8cb, 0xc8d5, 0xc8e7, 0xc9cf, 0xc9fa, 0xcab1, 0xcab5, 0xcac7, 
+             0xcad0, 0xcad6, 0xcaf5, 0xcafd, 0xccec, 0xcdf8, 0xceaa, 0xcec4, 0xced2, 0xcee5, 
+             0xcfb5, 0xcfc2, 0xcfd6, 0xd0c2, 0xd0c5, 0xd0d0, 0xd0d4, 0xd1a7, 0xd2aa, 0xd2b2, 
+             0xd2b5, 0xd2bb, 0xd2d4, 0xd3c3, 0xd3d0, 0xd3fd, 0xd4c2, 0xd4da, 0xd5e2, 0xd6d0};
+
+         
+         String getName() {
+             return "GB18030";
+         }
+         
+         int match(CharsetDetector det) {
+             return match(det, commonChars);
+         }
+         
+         public String getLanguage()
+         {
+             return "zh";
+         }
+     }
+     
+     
+}