You are viewing a plain text version of this content. The canonical link for it is here.
Posted to solr-commits@lucene.apache.org by no...@apache.org on 2009/12/11 14:29:18 UTC

svn commit: r889613 - in /lucene/solr/trunk/contrib/dataimporthandler: ./ src/main/java/org/apache/solr/handler/dataimport/ src/test/java/org/apache/solr/handler/dataimport/ src/test/resources/solr/conf/

Author: noble
Date: Fri Dec 11 13:29:17 2009
New Revision: 889613

URL: http://svn.apache.org/viewvc?rev=889613&view=rev
Log:
SOLR-1358 Integration of Tika and DataImportHandler

Added:
    lucene/solr/trunk/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java   (with props)
    lucene/solr/trunk/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestTikaEntityProcessor.java
    lucene/solr/trunk/contrib/dataimporthandler/src/test/resources/solr/conf/dataimport-schema-no-unique-key.xml
Modified:
    lucene/solr/trunk/contrib/dataimporthandler/CHANGES.txt

Modified: lucene/solr/trunk/contrib/dataimporthandler/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/solr/trunk/contrib/dataimporthandler/CHANGES.txt?rev=889613&r1=889612&r2=889613&view=diff
==============================================================================
--- lucene/solr/trunk/contrib/dataimporthandler/CHANGES.txt (original)
+++ lucene/solr/trunk/contrib/dataimporthandler/CHANGES.txt Fri Dec 11 13:29:17 2009
@@ -21,13 +21,15 @@
 New Features
 ----------------------
 
-* SOLR-1525 allow DIH to refer to core properties (noble)
+* SOLR-1525 : allow DIH to refer to core properties (noble)
 
-* SOLR-1547 TemplateTransformer copy objects more intelligently when there when the template is a single variable (noble)
+* SOLR-1547 : TemplateTransformer copy objects more intelligently when there when the template is a single variable (noble)
 
-* SOLR-1627 VariableResolver should be fetched just in time (noble)
+* SOLR-1627 : VariableResolver should be fetched just in time (noble)
 
-* SOLR-1583 Create DataSources that return InputStream (noble)
+* SOLR-1583 : Create DataSources that return InputStream (noble)
+
+* SOLR-1358 : Integration of Tika and DataImportHandler ( Akshay Ukey, noble)
 
 
 Optimizations

Added: lucene/solr/trunk/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java
URL: http://svn.apache.org/viewvc/lucene/solr/trunk/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java?rev=889613&view=auto
==============================================================================
--- lucene/solr/trunk/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java (added)
+++ lucene/solr/trunk/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java Fri Dec 11 13:29:17 2009
@@ -0,0 +1,198 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.dataimport;
+
+import org.apache.commons.io.IOUtils;
+import static org.apache.solr.handler.dataimport.DataImportHandlerException.SEVERE;
+import static org.apache.solr.handler.dataimport.DataImportHandlerException.wrapAndThrow;
+import static org.apache.solr.handler.dataimport.DataImporter.COLUMN;
+import static org.apache.solr.handler.dataimport.XPathEntityProcessor.URL;
+import org.apache.tika.config.TikaConfig;
+import org.apache.tika.metadata.Metadata;
+import org.apache.tika.parser.AutoDetectParser;
+import org.apache.tika.parser.Parser;
+import org.apache.tika.parser.ParseContext;
+import org.apache.tika.sax.BodyContentHandler;
+import org.apache.tika.sax.ContentHandlerDecorator;
+import org.apache.tika.sax.XHTMLContentHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.xml.sax.Attributes;
+import org.xml.sax.ContentHandler;
+import org.xml.sax.SAXException;
+import org.xml.sax.helpers.DefaultHandler;
+
+import javax.xml.transform.OutputKeys;
+import javax.xml.transform.TransformerConfigurationException;
+import javax.xml.transform.sax.SAXTransformerFactory;
+import javax.xml.transform.sax.TransformerHandler;
+import javax.xml.transform.stream.StreamResult;
+import java.io.File;
+import java.io.InputStream;
+import java.io.StringWriter;
+import java.io.Writer;
+import java.util.HashMap;
+import java.util.Map;
+/**
+ * <p>An implementation of EntityProcessor which reads data from rich docs using Tika
+ *
+ * @version $Id$
+ * @since solr 1.5
+ */
+public class TikaEntityProcessor extends EntityProcessorBase {
+  private TikaConfig tikaConfig;
+  private static final Logger LOG = LoggerFactory.getLogger(TikaEntityProcessor.class);
+  private String format = "text";
+  private boolean done = false;
+  private String parser;
+  static final String AUTO_PARSER = "org.apache.tika.parser.AutoDetectParser";
+
+
+  @Override
+  protected void firstInit(Context context) {
+    String tikaConfigFile = context.getResolvedEntityAttribute("tikaConfig");
+    if (tikaConfigFile == null) {
+      tikaConfig = TikaConfig.getDefaultConfig();
+    } else {
+      File configFile = new File(tikaConfigFile);
+      if (!configFile.isAbsolute()) {
+        configFile = new File(context.getSolrCore().getResourceLoader().getConfigDir(), tikaConfigFile);
+      }
+      try {
+        tikaConfig = new TikaConfig(configFile);
+      } catch (Exception e) {
+        wrapAndThrow (SEVERE, e,"Unable to load Tika Config");
+      }
+    }
+
+    format = context.getResolvedEntityAttribute("format");
+    if(format == null)
+      format = "text";
+    if (!"html".equals(format) && !"xml".equals(format) && !"text".equals(format)&& !"none".equals(format) )
+      throw new DataImportHandlerException(SEVERE, "'format' can be one of text|html|xml|none");
+    parser = context.getResolvedEntityAttribute("parser");
+    if(parser == null) {
+      parser = AUTO_PARSER;
+    }
+    done = false;
+  }
+
+  public Map<String, Object> nextRow() {
+    if(done) return null;
+    Map<String, Object> row = new HashMap<String, Object>();
+    DataSource<InputStream> dataSource = context.getDataSource();
+    InputStream is = dataSource.getData(context.getResolvedEntityAttribute(URL));
+    ContentHandler contentHandler = null;
+    Metadata metadata = new Metadata();
+    StringWriter sw = new StringWriter();
+    try {
+      if ("html".equals(format)) {
+        contentHandler = getHtmlHandler(sw);
+      } else if ("xml".equals(format)) {
+        contentHandler = getXmlContentHandler(sw);
+      } else if ("text".equals(format)) {
+        contentHandler = getTextContentHandler(sw);
+      } else if("none".equals(format)){
+        contentHandler = new DefaultHandler();        
+      }
+    } catch (TransformerConfigurationException e) {
+      wrapAndThrow(SEVERE, e, "Unable to create content handler");
+    }
+    Parser tikaParser = null;
+    if(parser.equals(AUTO_PARSER)){
+      AutoDetectParser parser = new AutoDetectParser();
+      parser.setConfig(tikaConfig);
+      tikaParser = parser;
+    } else {
+      tikaParser = (Parser) context.getSolrCore().getResourceLoader().newInstance(parser);
+    }
+    try {
+      tikaParser.parse(is, contentHandler, metadata , new ParseContext());
+    } catch (Exception e) {
+      if(ABORT.equals(onError)){
+        wrapAndThrow(SEVERE, e, "Unable to read content");
+      } else {
+        LOG.warn("Unable to parse document "+ context.getResolvedEntityAttribute(URL) ,e);
+        return null;
+      }
+    }
+    IOUtils.closeQuietly(is);
+    for (Map<String, String> field : context.getAllEntityFields()) {
+      if (!"true".equals(field.get("meta"))) continue;
+      String col = field.get(COLUMN);
+      String s = metadata.get(col);
+      if (s != null) row.put(col, s);
+    }
+    if(!"none".equals(format) ) row.put("text", sw.toString());
+    done = true;
+    return row;
+  }
+
+  private static ContentHandler getHtmlHandler(Writer writer)
+          throws TransformerConfigurationException {
+    SAXTransformerFactory factory = (SAXTransformerFactory)
+            SAXTransformerFactory.newInstance();
+    TransformerHandler handler = factory.newTransformerHandler();
+    handler.getTransformer().setOutputProperty(OutputKeys.METHOD, "html");
+    handler.setResult(new StreamResult(writer));
+    return new ContentHandlerDecorator(handler) {
+      @Override
+      public void startElement(
+              String uri, String localName, String name, Attributes atts)
+              throws SAXException {
+        if (XHTMLContentHandler.XHTML.equals(uri)) {
+          uri = null;
+        }
+        if (!"head".equals(localName)) {
+          super.startElement(uri, localName, name, atts);
+        }
+      }
+
+      @Override
+      public void endElement(String uri, String localName, String name)
+              throws SAXException {
+        if (XHTMLContentHandler.XHTML.equals(uri)) {
+          uri = null;
+        }
+        if (!"head".equals(localName)) {
+          super.endElement(uri, localName, name);
+        }
+      }
+
+      @Override
+      public void startPrefixMapping(String prefix, String uri) {/*no op*/ }
+
+      @Override
+      public void endPrefixMapping(String prefix) {/*no op*/ }
+    };
+  }
+
+  private static ContentHandler getTextContentHandler(Writer writer) {
+    return new BodyContentHandler(writer);
+  }
+
+  private static ContentHandler getXmlContentHandler(Writer writer)
+          throws TransformerConfigurationException {
+    SAXTransformerFactory factory = (SAXTransformerFactory)
+            SAXTransformerFactory.newInstance();
+    TransformerHandler handler = factory.newTransformerHandler();
+    handler.getTransformer().setOutputProperty(OutputKeys.METHOD, "xml");
+    handler.setResult(new StreamResult(writer));
+    return handler;
+  }
+
+}

Propchange: lucene/solr/trunk/contrib/dataimporthandler/src/main/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java
------------------------------------------------------------------------------
    svn:keywords = Id

Added: lucene/solr/trunk/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestTikaEntityProcessor.java
URL: http://svn.apache.org/viewvc/lucene/solr/trunk/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestTikaEntityProcessor.java?rev=889613&view=auto
==============================================================================
--- lucene/solr/trunk/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestTikaEntityProcessor.java (added)
+++ lucene/solr/trunk/contrib/dataimporthandler/src/test/java/org/apache/solr/handler/dataimport/TestTikaEntityProcessor.java Fri Dec 11 13:29:17 2009
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.dataimport;
+
+import org.junit.After;
+import org.junit.Before;
+
+/**Testcase for TikaEntityProcessor
+ * @version $Id$
+ * @since solr 1.5 
+ */
+public class TestTikaEntityProcessor extends AbstractDataImportHandlerTest {
+
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    super.tearDown();
+  }
+
+  public String getSchemaFile() {
+    return "dataimport-schema-no-unique-key.xml";
+  }
+
+  public String getSolrConfigFile() {
+    return "dataimport-solrconfig.xml";
+  }
+
+  public void testIndexingWithTikaEntityProcessor() throws Exception {
+    String conf =
+            "<dataConfig>" +
+                    "  <dataSource name=\"binary\" type=\"BinFileDataSource\"/>" +
+                    "  <document>" +
+                    "    <entity processor=\"TikaEntityProcessor\" url=\"../../../../extraction/src/test/resources/solr-word.pdf\" dataSource=\"binary\">" +
+                    "      <field column=\"Author\" meta=\"true\" name=\"author\"/>" +
+                    "      <field column=\"title\" meta=\"true\" name=\"docTitle\"/>" +
+                    "      <field column=\"text\"/>" +
+                    "     </entity>" +
+                    "  </document>" +
+                    "</dataConfig>";
+    super.runFullImport(conf);
+    assertQ(req("*:*"), "//*[@numFound='1']");
+  }
+}

Added: lucene/solr/trunk/contrib/dataimporthandler/src/test/resources/solr/conf/dataimport-schema-no-unique-key.xml
URL: http://svn.apache.org/viewvc/lucene/solr/trunk/contrib/dataimporthandler/src/test/resources/solr/conf/dataimport-schema-no-unique-key.xml?rev=889613&view=auto
==============================================================================
--- lucene/solr/trunk/contrib/dataimporthandler/src/test/resources/solr/conf/dataimport-schema-no-unique-key.xml (added)
+++ lucene/solr/trunk/contrib/dataimporthandler/src/test/resources/solr/conf/dataimport-schema-no-unique-key.xml Fri Dec 11 13:29:17 2009
@@ -0,0 +1,203 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--  
+ This is the Solr schema file. This file should be named "schema.xml" and
+ should be in the conf directory under the solr home
+ (i.e. ./solr/conf/schema.xml by default) 
+ or located where the classloader for the Solr webapp can find it.
+
+ This example schema is the recommended starting point for users.
+ It should be kept correct and concise, usable out-of-the-box.
+
+ For more information, on how to customize this file, please see
+ http://wiki.apache.org/solr/SchemaXml
+-->
+
+<schema name="test" version="1.2">
+  <!-- attribute "name" is the name of this schema and is only used for display purposes.
+       Applications should change this to reflect the nature of the search collection.
+       version="1.1" is Solr's version number for the schema syntax and semantics.  It should
+       not normally be changed by applications.
+       1.0: multiValued attribute did not exist, all fields are multiValued by nature
+       1.1: multiValued attribute introduced, false by default -->
+
+  <types>
+    <!-- field type definitions. The "name" attribute is
+       just a label to be used by field definitions.  The "class"
+       attribute and any other attributes determine the real
+       behavior of the fieldType.
+         Class names starting with "solr" refer to java classes in the
+       org.apache.solr.analysis package.
+    -->
+
+    <!-- The StrField type is not analyzed, but indexed/stored verbatim.  
+       - StrField and TextField support an optional compressThreshold which
+       limits compression (if enabled in the derived fields) to values which
+       exceed a certain size (in characters).
+    -->
+    <fieldType name="string" class="solr.StrField" sortMissingLast="true" omitNorms="true"/>
+
+    <!-- boolean type: "true" or "false" -->
+    <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true" omitNorms="true"/>
+
+    <!-- The optional sortMissingLast and sortMissingFirst attributes are
+         currently supported on types that are sorted internally as strings.
+       - If sortMissingLast="true", then a sort on this field will cause documents
+         without the field to come after documents with the field,
+         regardless of the requested sort order (asc or desc).
+       - If sortMissingFirst="true", then a sort on this field will cause documents
+         without the field to come before documents with the field,
+         regardless of the requested sort order.
+       - If sortMissingLast="false" and sortMissingFirst="false" (the default),
+         then default lucene sorting will be used which places docs without the
+         field first in an ascending sort and last in a descending sort.
+    -->    
+
+
+    <!-- numeric field types that store and index the text
+         value verbatim (and hence don't support range queries, since the
+         lexicographic ordering isn't equal to the numeric ordering) -->
+    <fieldType name="integer" class="solr.IntField" omitNorms="true"/>
+    <fieldType name="long" class="solr.LongField" omitNorms="true"/>
+    <fieldType name="float" class="solr.FloatField" omitNorms="true"/>
+    <fieldType name="double" class="solr.DoubleField" omitNorms="true"/>
+
+
+    <!-- Numeric field types that manipulate the value into
+         a string value that isn't human-readable in its internal form,
+         but with a lexicographic ordering the same as the numeric ordering,
+         so that range queries work correctly. -->
+    <fieldType name="sint" class="solr.SortableIntField" sortMissingLast="true" omitNorms="true"/>
+    <fieldType name="slong" class="solr.SortableLongField" sortMissingLast="true" omitNorms="true"/>
+    <fieldType name="sfloat" class="solr.SortableFloatField" sortMissingLast="true" omitNorms="true"/>
+    <fieldType name="sdouble" class="solr.SortableDoubleField" sortMissingLast="true" omitNorms="true"/>
+
+
+    <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
+         is a more restricted form of the canonical representation of dateTime
+         http://www.w3.org/TR/xmlschema-2/#dateTime    
+         The trailing "Z" designates UTC time and is mandatory.
+         Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z
+         All other components are mandatory.
+
+         Expressions can also be used to denote calculations that should be
+         performed relative to "NOW" to determine the value, ie...
+
+               NOW/HOUR
+                  ... Round to the start of the current hour
+               NOW-1DAY
+                  ... Exactly 1 day prior to now
+               NOW/DAY+6MONTHS+3DAYS
+                  ... 6 months and 3 days in the future from the start of
+                      the current day
+                      
+         Consult the DateField javadocs for more information.
+      -->
+    <fieldType name="date" class="solr.DateField" sortMissingLast="true" omitNorms="true"/>
+
+
+    <!-- The "RandomSortField" is not used to store or search any
+         data.  You can declare fields of this type it in your schema
+         to generate psuedo-random orderings of your docs for sorting 
+         purposes.  The ordering is generated based on the field name 
+         and the version of the index, As long as the index version
+         remains unchanged, and the same field name is reused,
+         the ordering of the docs will be consistent.  
+         If you want differend psuedo-random orderings of documents,
+         for the same version of the index, use a dynamicField and
+         change the name
+     -->
+    <fieldType name="random" class="solr.RandomSortField" indexed="true" />
+
+    <!-- solr.TextField allows the specification of custom text analyzers
+         specified as a tokenizer and a list of token filters. Different
+         analyzers may be specified for indexing and querying.
+
+         The optional positionIncrementGap puts space between multiple fields of
+         this type on the same document, with the purpose of preventing false phrase
+         matching across fields.
+
+         For more info on customizing your analyzer chain, please see
+         http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters
+     -->
+
+    <!-- One can also specify an existing Analyzer class that has a
+         default constructor via the class attribute on the analyzer element
+    <fieldType name="text_greek" class="solr.TextField">
+      <analyzer class="org.apache.lucene.analysis.el.GreekAnalyzer"/>
+    </fieldType>
+    -->
+
+    <!-- A text field that only splits on whitespace for exact matching of words -->
+    <fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
+      <analyzer>
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- A text field that uses WordDelimiterFilter to enable splitting and matching of
+        words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
+        so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
+        Synonyms and stopwords are customized by external files, and stemming is enabled.
+        Duplicate tokens at the same position (which may result from Stemmed Synonyms or
+        WordDelim parts) are removed.
+        -->
+    <fieldType name="text" class="solr.TextField" positionIncrementGap="100">
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <!-- in this example, we will only use synonyms at query time
+        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        -->
+        <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <!--<filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>-->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
+        <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <!--<filter class="solr.EnglishPorterFilterFactory" protected="protwords.txt"/>-->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      </analyzer>
+    </fieldType>
+    <!-- since fields of this type are by default not stored or indexed, any data added to 
+         them will be ignored outright 
+     --> 
+    <fieldtype name="ignored" stored="false" indexed="false" class="solr.StrField" /> 
+
+ </types>
+
+
+ <fields>
+   <field name="title" type="string" indexed="true" stored="true"/>
+   <field name="author" type="string" indexed="true" stored="true" />
+   <field name="text" type="text" indexed="true" stored="true" />
+   
+ </fields>
+ <!-- field for the QueryParser to use when an explicit fieldname is absent -->
+ <defaultSearchField>text</defaultSearchField>
+
+ <!-- SolrQueryParser configuration: defaultOperator="AND|OR" -->
+ <solrQueryParser defaultOperator="OR"/>
+
+</schema>