You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@stanbol.apache.org by rw...@apache.org on 2013/03/20 10:50:11 UTC

svn commit: r1458703 [2/4] - in /stanbol/trunk: ./ commons/ commons/frameworkfragment/ commons/solr/core/ commons/solr/core/src/main/java/org/apache/stanbol/commons/solr/ commons/solr/core/src/main/java/org/apache/stanbol/commons/solr/utils/ commons/so...

Modified: stanbol/trunk/enhancement-engines/pom.xml
URL: http://svn.apache.org/viewvc/stanbol/trunk/enhancement-engines/pom.xml?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/enhancement-engines/pom.xml (original)
+++ stanbol/trunk/enhancement-engines/pom.xml Wed Mar 20 09:50:09 2013
@@ -64,6 +64,9 @@
     <module>paoding-token</module> <!-- tokenizing -->
     <module>nlp2rdf</module> <!-- converts AnalyzedText ContentPart to RDF -->
 
+    <!-- Japanese NLP processing -->
+    <module>kuromoji-nlp</module>
+
     <!-- RESTful NLP analyser service engine-->
     <module>restful-nlp</module> <!-- see STANBOL-893 -->
     <module>restful-langident</module> <!-- see STANBOL-895 -->

Modified: stanbol/trunk/enhancement-engines/sentiment-wordclassifier/pom.xml
URL: http://svn.apache.org/viewvc/stanbol/trunk/enhancement-engines/sentiment-wordclassifier/pom.xml?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/enhancement-engines/sentiment-wordclassifier/pom.xml (original)
+++ stanbol/trunk/enhancement-engines/sentiment-wordclassifier/pom.xml Wed Mar 20 09:50:09 2013
@@ -108,11 +108,15 @@
       <artifactId>org.apache.stanbol.enhancer.nlp</artifactId>
       <version>0.10.0</version>
     </dependency>
-    <dependency>
+    <dependency> <!-- for tracking and loading sentiment wordlists -->
       <groupId>org.apache.stanbol</groupId>
-      <artifactId>org.apache.stanbol.commons.solr.core</artifactId>
+      <artifactId>org.apache.stanbol.commons.stanboltools.datafileprovider</artifactId>
       <version>0.11.0</version>
     </dependency>
+    <dependency><!-- for stemming English words -->
+      <groupId>org.apache.lucene</groupId>
+      <artifactId>lucene-analyzers-common</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.apache.felix</groupId>
       <artifactId>org.apache.felix.scr.annotations</artifactId>

Modified: stanbol/trunk/enhancement-engines/sentiment-wordclassifier/src/main/java/org/apache/stanbol/enhancer/engines/sentiment/classifiers/SentiWordNet.java
URL: http://svn.apache.org/viewvc/stanbol/trunk/enhancement-engines/sentiment-wordclassifier/src/main/java/org/apache/stanbol/enhancer/engines/sentiment/classifiers/SentiWordNet.java?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/enhancement-engines/sentiment-wordclassifier/src/main/java/org/apache/stanbol/enhancer/engines/sentiment/classifiers/SentiWordNet.java (original)
+++ stanbol/trunk/enhancement-engines/sentiment-wordclassifier/src/main/java/org/apache/stanbol/enhancer/engines/sentiment/classifiers/SentiWordNet.java Wed Mar 20 09:50:09 2013
@@ -167,7 +167,7 @@ public class SentiWordNet {
         private ReadWriteLock lock = new ReentrantReadWriteLock();
         private Map<String,Double> wordMap = new TreeMap<String,Double>();
 
-        private EnglishMinimalStemmer stemmer = new EnglishMinimalStemmer();
+        private org.apache.lucene.analysis.en.EnglishMinimalStemmer stemmer = new EnglishMinimalStemmer();
 
         protected SentiWordNetClassifierEN() {}
 

Modified: stanbol/trunk/enhancement-engines/smartcn-token/pom.xml
URL: http://svn.apache.org/viewvc/stanbol/trunk/enhancement-engines/smartcn-token/pom.xml?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/enhancement-engines/smartcn-token/pom.xml (original)
+++ stanbol/trunk/enhancement-engines/smartcn-token/pom.xml Wed Mar 20 09:50:09 2013
@@ -86,7 +86,7 @@
     <dependency>
       <groupId>org.apache.stanbol</groupId>
       <artifactId>org.apache.stanbol.commons.solr.extras.smartcn</artifactId>
-      <version>0.11.0</version>
+      <version>0.12.0-SNAPSHOT</version>
     </dependency>
     <dependency>
       <groupId>org.apache.stanbol</groupId>

Modified: stanbol/trunk/enhancement-engines/smartcn-token/src/main/java/org/apache/stanbol/enhancer/engines/smartcn/impl/SmartcnSentenceEngine.java
URL: http://svn.apache.org/viewvc/stanbol/trunk/enhancement-engines/smartcn-token/src/main/java/org/apache/stanbol/enhancer/engines/smartcn/impl/SmartcnSentenceEngine.java?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/enhancement-engines/smartcn-token/src/main/java/org/apache/stanbol/enhancer/engines/smartcn/impl/SmartcnSentenceEngine.java (original)
+++ stanbol/trunk/enhancement-engines/smartcn-token/src/main/java/org/apache/stanbol/enhancer/engines/smartcn/impl/SmartcnSentenceEngine.java Wed Mar 20 09:50:09 2013
@@ -20,6 +20,7 @@ import static org.apache.stanbol.enhance
 import static org.apache.stanbol.enhancer.nlp.utils.NlpEngineHelper.initAnalysedText;
 
 import java.io.IOException;
+import java.io.StringReader;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -152,6 +153,7 @@ public class SmartcnSentenceEngine exten
         //first the sentences
         TokenStream sentences = new SentenceTokenizer(new CharSequenceReader(at.getText()));
         try {
+        	sentences.reset();
             while(sentences.incrementToken()){
                 OffsetAttribute offset = sentences.addAttribute(OffsetAttribute.class);
                 Sentence s = at.addSentence(offset.startOffset(), offset.endOffset());
@@ -203,6 +205,7 @@ public class SmartcnSentenceEngine exten
         private Sentence sentence = null;
 
         protected AnalyzedTextSentenceTokenizer(AnalysedText at) {
+            super(new StringReader(at.getText().toString()));
             this.at = at;
             sentences = at.getSentences();
         }

Modified: stanbol/trunk/enhancement-engines/smartcn-token/src/main/java/org/apache/stanbol/enhancer/engines/smartcn/impl/SmartcnTokenizerEngine.java
URL: http://svn.apache.org/viewvc/stanbol/trunk/enhancement-engines/smartcn-token/src/main/java/org/apache/stanbol/enhancer/engines/smartcn/impl/SmartcnTokenizerEngine.java?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/enhancement-engines/smartcn-token/src/main/java/org/apache/stanbol/enhancer/engines/smartcn/impl/SmartcnTokenizerEngine.java (original)
+++ stanbol/trunk/enhancement-engines/smartcn-token/src/main/java/org/apache/stanbol/enhancer/engines/smartcn/impl/SmartcnTokenizerEngine.java Wed Mar 20 09:50:09 2013
@@ -20,6 +20,7 @@ import static org.apache.stanbol.enhance
 import static org.apache.stanbol.enhancer.nlp.utils.NlpEngineHelper.initAnalysedText;
 
 import java.io.IOException;
+import java.io.StringReader;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -170,6 +171,7 @@ public class SmartcnTokenizerEngine exte
         //now the tokens
         TokenStream tokens = new WordTokenFilter(new AnalyzedTextSentenceTokenizer(at));
         try {
+        	tokens.reset();
             while(tokens.incrementToken()){
                 OffsetAttribute offset = tokens.addAttribute(OffsetAttribute.class);
                 Token t = at.addToken(offset.startOffset(), offset.endOffset());
@@ -219,6 +221,7 @@ public class SmartcnTokenizerEngine exte
         private Sentence sentence = null;
 
         protected AnalyzedTextSentenceTokenizer(AnalysedText at) {
+            super(new StringReader(at.getText().toString()));
             this.at = at;
             sentences = at.getSentences();
         }

Modified: stanbol/trunk/enhancement-engines/topic/engine/pom.xml
URL: http://svn.apache.org/viewvc/stanbol/trunk/enhancement-engines/topic/engine/pom.xml?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/enhancement-engines/topic/engine/pom.xml (original)
+++ stanbol/trunk/enhancement-engines/topic/engine/pom.xml Wed Mar 20 09:50:09 2013
@@ -218,7 +218,6 @@
     <dependency>
       <groupId>org.apache.httpcomponents</groupId>
       <artifactId>httpcore-osgi</artifactId>
-      <version>4.0.1</version>
       <scope>test</scope>
     </dependency>
     <dependency>
@@ -294,15 +293,16 @@
       <artifactId>org.apache.stanbol.enhancer.servicesapi</artifactId>
       <version>0.10.0</version>
     </dependency>
-    <dependency>
+    <!-- anyway transitive dependency of managed
+      <dependency>
       <groupId>org.apache.stanbol</groupId>
       <artifactId>org.apache.stanbol.commons.solr.core</artifactId>
-      <version>0.11.0</version>
-    </dependency>
+      <version>0.12.0-SNAPSHOT</version>
+    </dependency> -->
     <dependency>
       <groupId>org.apache.stanbol</groupId>
       <artifactId>org.apache.stanbol.commons.solr.managed</artifactId>
-      <version>0.11.0</version>
+      <version>0.12.0-SNAPSHOT</version>
     </dependency>    
   </dependencies>
 

Propchange: stanbol/trunk/enhancement-engines/uima/uimalocal-template/
------------------------------------------------------------------------------
  Merged /stanbol/branches/stanbol-solr4/enhancement-engines/uima/uimalocal-template:r1455112-1458685

Propchange: stanbol/trunk/enhancement-engines/uima/uimaremote/
------------------------------------------------------------------------------
  Merged /stanbol/branches/stanbol-solr4/enhancement-engines/uima/uimaremote:r1455112-1458685

Propchange: stanbol/trunk/enhancement-engines/uima/uimatotriples/
------------------------------------------------------------------------------
  Merged /stanbol/branches/stanbol-solr4/enhancement-engines/uima/uimatotriples:r1455112-1458685

Propchange: stanbol/trunk/enhancer/
------------------------------------------------------------------------------
  Merged /stanbol/branches/stanbol-solr4/enhancer:r1455112-1458685

Propchange: stanbol/trunk/enhancer/generic/servicesapi/
------------------------------------------------------------------------------
  Merged /stanbol/branches/stanbol-solr4/enhancer/generic/servicesapi:r1455112-1458685

Modified: stanbol/trunk/entityhub/generic/test/src/main/java/org/apache/stanbol/entityhub/test/it/AssertEntityhubJson.java
URL: http://svn.apache.org/viewvc/stanbol/trunk/entityhub/generic/test/src/main/java/org/apache/stanbol/entityhub/test/it/AssertEntityhubJson.java?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/entityhub/generic/test/src/main/java/org/apache/stanbol/entityhub/test/it/AssertEntityhubJson.java (original)
+++ stanbol/trunk/entityhub/generic/test/src/main/java/org/apache/stanbol/entityhub/test/it/AssertEntityhubJson.java Wed Mar 20 09:50:09 2013
@@ -63,6 +63,7 @@ public class AssertEntityhubJson {
      * {@link QueryTestCase#getExpectedStatus()} is a 2xx status code.
      */
     public static void assertQueryResults(RequestExecutor re, QueryTestCase test) throws JSONException{
+    	log.debug("Assert Query Results for test {}",test.getContent());
         re.assertStatus(test.getExpectedStatus());
         re.assertContentType("application/json"); //currently only application/json is supported
         if(!test.expectsSuccess()){

Modified: stanbol/trunk/entityhub/generic/test/src/main/java/org/apache/stanbol/entityhub/test/it/EntityhubTestBase.java
URL: http://svn.apache.org/viewvc/stanbol/trunk/entityhub/generic/test/src/main/java/org/apache/stanbol/entityhub/test/it/EntityhubTestBase.java?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/entityhub/generic/test/src/main/java/org/apache/stanbol/entityhub/test/it/EntityhubTestBase.java (original)
+++ stanbol/trunk/entityhub/generic/test/src/main/java/org/apache/stanbol/entityhub/test/it/EntityhubTestBase.java Wed Mar 20 09:50:09 2013
@@ -97,6 +97,14 @@ public abstract class EntityhubTestBase 
                             referencedSite));
                     }
                 }
+                //this ensures that all sites are initialized
+                for(String referencedSite : referencedSites){
+	                re = executor.execute(
+	                        builder.buildGetRequest("/entityhub/site/"+referencedSite +
+	                        		"/entity?id=urn:does:not:exist:f82js95xsig39s.23987")
+	                        .withHeader("Accept", "application/json"));
+	                re.assertStatus(404);
+                }
                 log.info("Entityhub services checked, all present");
                 return true;
             }

Modified: stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/mapping-ISOLatin1Accent.txt
URL: http://svn.apache.org/viewvc/stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/mapping-ISOLatin1Accent.txt?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/mapping-ISOLatin1Accent.txt (original)
+++ stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/mapping-ISOLatin1Accent.txt Wed Mar 20 09:50:09 2013
@@ -1,246 +1,246 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Syntax:
-#   "source" => "target"
-#     "source".length() > 0 (source cannot be empty.)
-#     "target".length() >= 0 (target can be empty.)
-
-# example:
-#   "À" => "A"
-#   "\u00C0" => "A"
-#   "\u00C0" => "\u0041"
-#   "ß" => "ss"
-#   "\t" => " "
-#   "\n" => ""
-
-# À => A
-"\u00C0" => "A"
-
-# Á => A
-"\u00C1" => "A"
-
-# Â => A
-"\u00C2" => "A"
-
-# Ã => A
-"\u00C3" => "A"
-
-# Ä => A
-"\u00C4" => "A"
-
-# Å => A
-"\u00C5" => "A"
-
-# Æ => AE
-"\u00C6" => "AE"
-
-# Ç => C
-"\u00C7" => "C"
-
-# È => E
-"\u00C8" => "E"
-
-# É => E
-"\u00C9" => "E"
-
-# Ê => E
-"\u00CA" => "E"
-
-# Ë => E
-"\u00CB" => "E"
-
-# Ì => I
-"\u00CC" => "I"
-
-# Í => I
-"\u00CD" => "I"
-
-# Î => I
-"\u00CE" => "I"
-
-# Ï => I
-"\u00CF" => "I"
-
-# IJ => IJ
-"\u0132" => "IJ"
-
-# Ð => D
-"\u00D0" => "D"
-
-# Ñ => N
-"\u00D1" => "N"
-
-# Ò => O
-"\u00D2" => "O"
-
-# Ó => O
-"\u00D3" => "O"
-
-# Ô => O
-"\u00D4" => "O"
-
-# Õ => O
-"\u00D5" => "O"
-
-# Ö => O
-"\u00D6" => "O"
-
-# Ø => O
-"\u00D8" => "O"
-
-# Π=> OE
-"\u0152" => "OE"
-
-# Þ
-"\u00DE" => "TH"
-
-# Ù => U
-"\u00D9" => "U"
-
-# Ú => U
-"\u00DA" => "U"
-
-# Û => U
-"\u00DB" => "U"
-
-# Ü => U
-"\u00DC" => "U"
-
-# Ý => Y
-"\u00DD" => "Y"
-
-# Ÿ => Y
-"\u0178" => "Y"
-
-# à => a
-"\u00E0" => "a"
-
-# á => a
-"\u00E1" => "a"
-
-# â => a
-"\u00E2" => "a"
-
-# ã => a
-"\u00E3" => "a"
-
-# ä => a
-"\u00E4" => "a"
-
-# å => a
-"\u00E5" => "a"
-
-# æ => ae
-"\u00E6" => "ae"
-
-# ç => c
-"\u00E7" => "c"
-
-# è => e
-"\u00E8" => "e"
-
-# é => e
-"\u00E9" => "e"
-
-# ê => e
-"\u00EA" => "e"
-
-# ë => e
-"\u00EB" => "e"
-
-# ì => i
-"\u00EC" => "i"
-
-# í => i
-"\u00ED" => "i"
-
-# î => i
-"\u00EE" => "i"
-
-# ï => i
-"\u00EF" => "i"
-
-# ij => ij
-"\u0133" => "ij"
-
-# ð => d
-"\u00F0" => "d"
-
-# ñ => n
-"\u00F1" => "n"
-
-# ò => o
-"\u00F2" => "o"
-
-# ó => o
-"\u00F3" => "o"
-
-# ô => o
-"\u00F4" => "o"
-
-# õ => o
-"\u00F5" => "o"
-
-# ö => o
-"\u00F6" => "o"
-
-# ø => o
-"\u00F8" => "o"
-
-# œ => oe
-"\u0153" => "oe"
-
-# ß => ss
-"\u00DF" => "ss"
-
-# þ => th
-"\u00FE" => "th"
-
-# ù => u
-"\u00F9" => "u"
-
-# ú => u
-"\u00FA" => "u"
-
-# û => u
-"\u00FB" => "u"
-
-# ü => u
-"\u00FC" => "u"
-
-# ý => y
-"\u00FD" => "y"
-
-# ÿ => y
-"\u00FF" => "y"
-
-# ff => ff
-"\uFB00" => "ff"
-
-# fi => fi
-"\uFB01" => "fi"
-
-# fl => fl
-"\uFB02" => "fl"
-
-# ffi => ffi
-"\uFB03" => "ffi"
-
-# ffl => ffl
-"\uFB04" => "ffl"
-
-# ſt => ft
-"\uFB05" => "ft"
-
-# st => st
-"\uFB06" => "st"
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Syntax:
+#   "source" => "target"
+#     "source".length() > 0 (source cannot be empty.)
+#     "target".length() >= 0 (target can be empty.)
+
+# example:
+#   "À" => "A"
+#   "\u00C0" => "A"
+#   "\u00C0" => "\u0041"
+#   "ß" => "ss"
+#   "\t" => " "
+#   "\n" => ""
+
+# À => A
+"\u00C0" => "A"
+
+# Á => A
+"\u00C1" => "A"
+
+# Â => A
+"\u00C2" => "A"
+
+# Ã => A
+"\u00C3" => "A"
+
+# Ä => A
+"\u00C4" => "A"
+
+# Å => A
+"\u00C5" => "A"
+
+# Æ => AE
+"\u00C6" => "AE"
+
+# Ç => C
+"\u00C7" => "C"
+
+# È => E
+"\u00C8" => "E"
+
+# É => E
+"\u00C9" => "E"
+
+# Ê => E
+"\u00CA" => "E"
+
+# Ë => E
+"\u00CB" => "E"
+
+# Ì => I
+"\u00CC" => "I"
+
+# Í => I
+"\u00CD" => "I"
+
+# Î => I
+"\u00CE" => "I"
+
+# Ï => I
+"\u00CF" => "I"
+
+# IJ => IJ
+"\u0132" => "IJ"
+
+# Ð => D
+"\u00D0" => "D"
+
+# Ñ => N
+"\u00D1" => "N"
+
+# Ò => O
+"\u00D2" => "O"
+
+# Ó => O
+"\u00D3" => "O"
+
+# Ô => O
+"\u00D4" => "O"
+
+# Õ => O
+"\u00D5" => "O"
+
+# Ö => O
+"\u00D6" => "O"
+
+# Ø => O
+"\u00D8" => "O"
+
+# Π=> OE
+"\u0152" => "OE"
+
+# Þ
+"\u00DE" => "TH"
+
+# Ù => U
+"\u00D9" => "U"
+
+# Ú => U
+"\u00DA" => "U"
+
+# Û => U
+"\u00DB" => "U"
+
+# Ü => U
+"\u00DC" => "U"
+
+# Ý => Y
+"\u00DD" => "Y"
+
+# Ÿ => Y
+"\u0178" => "Y"
+
+# à => a
+"\u00E0" => "a"
+
+# á => a
+"\u00E1" => "a"
+
+# â => a
+"\u00E2" => "a"
+
+# ã => a
+"\u00E3" => "a"
+
+# ä => a
+"\u00E4" => "a"
+
+# å => a
+"\u00E5" => "a"
+
+# æ => ae
+"\u00E6" => "ae"
+
+# ç => c
+"\u00E7" => "c"
+
+# è => e
+"\u00E8" => "e"
+
+# é => e
+"\u00E9" => "e"
+
+# ê => e
+"\u00EA" => "e"
+
+# ë => e
+"\u00EB" => "e"
+
+# ì => i
+"\u00EC" => "i"
+
+# í => i
+"\u00ED" => "i"
+
+# î => i
+"\u00EE" => "i"
+
+# ï => i
+"\u00EF" => "i"
+
+# ij => ij
+"\u0133" => "ij"
+
+# ð => d
+"\u00F0" => "d"
+
+# ñ => n
+"\u00F1" => "n"
+
+# ò => o
+"\u00F2" => "o"
+
+# ó => o
+"\u00F3" => "o"
+
+# ô => o
+"\u00F4" => "o"
+
+# õ => o
+"\u00F5" => "o"
+
+# ö => o
+"\u00F6" => "o"
+
+# ø => o
+"\u00F8" => "o"
+
+# œ => oe
+"\u0153" => "oe"
+
+# ß => ss
+"\u00DF" => "ss"
+
+# þ => th
+"\u00FE" => "th"
+
+# ù => u
+"\u00F9" => "u"
+
+# ú => u
+"\u00FA" => "u"
+
+# û => u
+"\u00FB" => "u"
+
+# ü => u
+"\u00FC" => "u"
+
+# ý => y
+"\u00FD" => "y"
+
+# ÿ => y
+"\u00FF" => "y"
+
+# ff => ff
+"\uFB00" => "ff"
+
+# fi => fi
+"\uFB01" => "fi"
+
+# fl => fl
+"\uFB02" => "fl"
+
+# ffi => ffi
+"\uFB03" => "ffi"
+
+# ffl => ffl
+"\uFB04" => "ffl"
+
+# ſt => ft
+"\uFB05" => "ft"
+
+# st => st
+"\uFB06" => "st"

Modified: stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/protwords.txt
URL: http://svn.apache.org/viewvc/stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/protwords.txt?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/protwords.txt (original)
+++ stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/protwords.txt Wed Mar 20 09:50:09 2013
@@ -1,21 +1,19 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#-----------------------------------------------------------------------
-# Use a protected word file to protect against the stemmer reducing two
-# unrelated words to the same base word.
-
-# Some non-words that normally won't be encountered,
-# just to test that they won't be stemmed.
-dontstems
-zwhacky
-
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#-----------------------------------------------------------------------
+# Use a protected word file to protect against the stemmer reducing two
+# unrelated words to the same base word.
+
+# Some non-words that normally won't be encountered,
+# just to test that they won't be stemmed.
+

Modified: stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/schema.xml
URL: http://svn.apache.org/viewvc/stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/schema.xml?rev=1458703&r1=1458702&r2=1458703&view=diff
==============================================================================
--- stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/schema.xml (original)
+++ stanbol/trunk/entityhub/indexing/destination/solryard/src/test/resources/testConfigs/withSolrConf/indexing/config/simple/conf/schema.xml Wed Mar 20 09:50:09 2013
@@ -32,12 +32,12 @@
  to specific requirements. See the comments within this schema for more
  details!
 
- For more information, on how to customize the Solr schema.xml in general, 
- please see http://wiki.apache.org/solr/SchemaXml.
+ For more information, on how to customize this file, please see
+ http://wiki.apache.org/solr/SchemaXml
 
 -->
 
-<schema name="Apache Stanbol SolrYard Schema" version="1.2">
+<schema name="Apache Stanbol SolrYard Schema" version="1.5">
   <!--
     The SolrYard supports a list of types that is reflected by
     "fieldType" specifications within this schema.
@@ -50,14 +50,17 @@
       used for ISBN numbers, article numbers, string representations of
       unsupported data types ...
     -->
-    <fieldType name="string" class="solr.StrField" sortMissingLast="true" omitNorms="false"/>
+    <!-- The StrField type is not analyzed, but indexed/stored verbatim. -->
+    <fieldType name="string" class="solr.StrField" sortMissingLast="true" omitNorms="false"/>    
+
     <!-- 
       This can be used as alternative to "string" to enable case insensitive
       searches on string values.
       The KeywordTokenizerFactory ensures that the whole string is preserved as
       a single token.
     -->
-    <fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
+    <!-- lowercases the entire field value, keeping it as a single token.  -->
+    <fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100" omitNorms="false">
       <analyzer>
         <tokenizer class="solr.KeywordTokenizerFactory"/>
         <filter class="solr.LowerCaseFilterFactory" />
@@ -70,38 +73,51 @@
     <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings.
         Currently not used by the SolrYard implementation, but reserved for future use. -->
     <fieldtype name="binary" class="solr.BinaryField"/>
-    <!--
-      Default numeric and date field types. By default used to index numeric values.
-      Note that the "solr.TrieIntField" does support indexing values at various
-      levels of precision to accelerate range queries. However the
-      precisionStep of 0 used by this fieldTypes disables this feature.
-      Change presisionStep to values > 0 to activate hierarchival indexing
-      for all numeric fields of that types. See Solr documentation for
-      suitable values and examples.
-    -->
-    <fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="false" positionIncrementGap="0"/>
-    <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" omitNorms="false" positionIncrementGap="0"/>
-    <fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="false" positionIncrementGap="0"/>
-    <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" omitNorms="false" positionIncrementGap="0"/>
-    <fieldType name="date" class="solr.TrieDateField" omitNorms="false" precisionStep="0" positionIncrementGap="0"/>
+    <fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
+    <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
+    <fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
+    <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
+    <fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
+    <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
+    <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
+    <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
+ 
+    <fieldType name="date" class="solr.TrieDateField" precisionStep="0" positionIncrementGap="0"/>
+    <fieldType name="tdate" class="solr.TrieDateField" precisionStep="6" positionIncrementGap="0"/>
+    
+    <fieldType name="random" class="solr.RandomSortField" indexed="true" />
 
+    <!-- Special non-natural language field types -->
+    
+    <!-- This point type indexes the coordinates as separate fields (subFields)
+     If subFieldType is defined, it references a type, and a dynamic field
+     definition is created matching *___<typename>.  Alternately, if
+     subFieldSuffix is defined, that is used to create the subFields.
+     Example: if subFieldType="double", then the coordinates would be
+     indexed in fields myloc_0___double,myloc_1___double.
+     Example: if subFieldSuffix="_d" then the coordinates would be indexed
+     in fields myloc_0_d,myloc_1_d
+     The subFields are an implementation detail of the fieldType, and end
+     users normally should not need to know about them.
+     -->
+    <fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d"/>
+    
+    <!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. -->
+    <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
+    
+    <!-- An alternative geospatial field type new to Solr 4.  It supports multiValued and polygon shapes.
+     For more information about this and other Spatial fields new to Solr 4, see:
+     http://wiki.apache.org/solr/SolrAdaptersForLuceneSpatial4
+     -->
+    <fieldType name="location_rpt" class="solr.SpatialRecursivePrefixTreeFieldType"
+    geo="true" distErrPct="0.025" maxDistErr="0.000009" units="degrees" />
+    
+    
     <!--
-      Numeric and date field types that do activate indexing values at various
-      levels of precision to accelerate range queries.
-      This can be used to activate hierarchival indexing for specific
-      fields. See Notes within the field section.
-    -->
-    <fieldType name="tint" class="solr.TrieIntField" precisionStep="8" omitNorms="false" positionIncrementGap="0"/>
-    <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" omitNorms="false" positionIncrementGap="0"/>
-    <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" omitNorms="false" positionIncrementGap="0"/>
-    <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" omitNorms="false" positionIncrementGap="0"/>
-    <fieldType name="tdate" class="solr.TrieDateField" omitNorms="false" precisionStep="6" positionIncrementGap="0"/>
-
-    <!-- 
       Natural Language Texts
-      
+     
       Indexing of natural language texts are supported by the solr.TextField class that
-      allows the specification of custom text analyzers specified as a tokenizer and a 
+      allows the specification of custom text analyzers specified as a tokenizer and a
       list of token filters.
       
       For more info on customizing your analyzer chain, please see
@@ -117,137 +133,93 @@
       together with string values within a special field to support searches for
       texts without an specified language.
     -->
+
     <!-- 
-      A general unstemmed text field - good if one does not know the language of the field.
-      This is used as the default fieldType for fields that store values of different
-      languages.
-      It is also the default fieldType for languages that do not define special fieldTypes.
-    -->
-    <fieldType name="textgen" class="solr.TextField" positionIncrementGap="100">
+         ENGLISH
+     
+         This is the default fieldType used for english language texts. It is
+         based on the "text_en_splitting_tight" of the default Solr 4.1 distribution
+         
+         Less flexible matching, but less false matches.  Probably not ideal for product names,
+         but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
+    <fieldType name="text_en" class="solr.TextField" positionIncrementGap="100" omitNorms="false">
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" enablePositionIncrements="true" />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.HyphenatedWordsFilterFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords_en.txt"/>
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" enablePositionIncrements="true"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- 
-      A text field that only splits on whitespace for exact matching of words.
-      Currently not used. May be used as an alternative to the textgen fieldType.
-    -->
-    <!--
-    <fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      </analyzer>
-    </fieldType>
-    -->
-    
-    <!-- 
-      This is the default fieldType used for english language texts.
-      
-      Less flexible matching than the text_en field type, but less false matches.  
-      Probably not ideal for product names, but may be good for SKUs. 
-      Can insert dashes in the wrong place and still match.
-    -->
-    <fieldType name="text_en_Tight" class="solr.TextField" positionIncrementGap="100" >
-      <analyzer>
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords_en.txt"/>
         <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
-        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterFilter in conjuncton with stemming. -->
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
       </analyzer>
     </fieldType>
 
-
     <!-- 
-      This can be used as an alternative to the "text_en_Tight" fieldTpye for
-      english langauge texts.
-      
-      A text field that uses WordDelimiterFilter to enable splitting and matching of
-      words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
-      so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
-      Synonyms and stopwords are customized by external files, and stemming is enabled.
-    -->
-    <!--
-    <fieldType name="text_en" class="solr.TextField" positionIncrementGap="100">
-      <analyzer type="index">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" enablePositionIncrements="true"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" enablePositionIncrements="true"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+         GENERIC (no specific lanugage support)
+     
+         The default for any language without a special field definition.
+         
+         Uses the ICUTokenizer and tries to convert alphabetic, numeric, and symbolic Unicode characters which 
+         are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII 
+         equivalents, if one exists. (STANBOL-
+         (see http://lucene.apache.org/java/2_9_1/api/all/org/apache/lucene/analysis/ASCIIFoldingFilter.html)
+
+     	-->
+    <fieldType name="textgen" class="solr.TextField" positionIncrementGap="100" omitNorms="false">
+      <analyzer>
+        <tokenizer class="solr.ICUTokenizerFactory"/>
+        <filter class="solr.ASCIIFoldingFilterFactory"/>
+        <filter class="solr.HyphenatedWordsFilterFactory"/>
         <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="English" protected="protwords.txt"/>
       </analyzer>
     </fieldType>
-    -->
-    
-    <!--
-      The SolrYard allows leading Wildcards (e.g. "*aris"). To provide
-      good query performance for such queries one need to configure
-      fieldTypes that use the ReversedWildcardFilterFactory as shown by
-      this example.
-      See Solr documentation for details
-      
-      A general unstemmed text field that indexes tokens normally and also
-      reversed (via ReversedWildcardFilterFactory), to enable more efficient 
-	  leading wildcard queries. 
-    -->
-    <!--
-    <fieldType name="text_rev" class="solr.TextField" positionIncrementGap="100">
-      <analyzer type="index">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" enablePositionIncrements="true" />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.ReversedWildcardFilterFactory" withOriginal="true" maxPosAsterisk="3" maxPosQuestion="2" maxFractionAsterisk="0.33"/>
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" enablePositionIncrements="true"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
+
+
+    <!-- A KeywordTokenizer that does not include some properties of the source text.
+         
+         TODO:
+          - This might be usefull for searching labels
+          - Rename to label if used for that
+          - Add 0-9 to the regex patter to preserve numbers
+         
+      -->
+    <fieldType name="alphaOnlySort" class="solr.TextField" sortMissingLast="true" omitNorms="false">
+      <analyzer>
+        <!-- KeywordTokenizer does not tokenize -->
+        <tokenizer class="solr.KeywordTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory" />
+        <filter class="solr.TrimFilterFactory" />
+        <filter class="solr.PatternReplaceFilterFactory" pattern="([^a-z])" replacement="" replace="all" />
       </analyzer>
     </fieldType>
-    -->
-    <!-- charFilter + WhitespaceTokenizer  -->
-    <!--
-    <fieldType name="textCharNorm" class="solr.TextField" positionIncrementGap="100" >
+    
+    <fieldType name="text_path" class="solr.TextField" positionIncrementGap="100" omitNorms="false">
       <analyzer>
-        <charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <tokenizer class="solr.PathHierarchyTokenizerFactory"/>
       </analyzer>
     </fieldType>
-    -->
-
-    <!--
-      This can be used to deactivate some functionality of the SolrYard or
-      to configure that some fields of a data set are not stored nor indexed
-      regardless of the Apache Stanbol Entityhub configuration!
-    --> 
-    <fieldtype name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" /> 
 
+    <!-- since fields of this type are by default not stored or indexed,
+         any data added to them will be ignored outright.  --> 
+    <fieldtype name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
+
+    <!-- Spatial features are not yet supported by the Entityhub
+    <fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d"/>
+    <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
+    <fieldtype name="geohash" class="solr.GeoHashField"/>
+     -->
  </types>
 
 
@@ -267,7 +239,7 @@
     (via copyField). This is used as default search field.
     The type may be changed.
      -->
-   <field name="_text" type="textgen" indexed="true" stored="false" multiValued="true"/>
+   <field name="_text" type="textgen" indexed="true" stored="false" multiValued="true" termVectors="true"/>
    <!-- 
      used to store all references of the document (via copyField).
      This field may be used to search for related entities.
@@ -280,6 +252,9 @@
      Do not change this definition!
    -->
    <field name="_domain" type="string" indexed="true" stored="false" multiValued="true"/>
+   
+   <!-- defined to fullfill required fields for SolrCloud (see http://wiki.apache.org/solr/SolrCloud#schema.xml )-->
+   <field name="_version_" type="long" indexed="true" stored="true" multiValued="false"/>
 
    <!-- 
      Dynamic field definitions (used if a field name is not found)
@@ -312,12 +287,17 @@
    <dynamicField name="dou/*"  type="double"  indexed="true"  stored="true" multiValued="true"/>
    <dynamicField name="cal/*"  type="date"    indexed="true"  stored="true" multiValued="true"/>
    <dynamicField name="dur/*"  type="string"  indexed="true"  stored="true" multiValued="true"/>
-   <!-- 
+   <!-- Additional dynamic fiels for geo spatial search (currently not supported by the SolrYard) -->
+   <dynamicField name="coord/*"  type="tdouble" indexed="true"  stored="false" />
+   <dynamicField name="loc/*"    type="location" indexed="true" stored="true"/>
+   <dynamicField name="geo/*"    type="location_rpt"  indexed="true" stored="true"  multiValued="true" />
+
+   <!--
      String fields that are not natural language
      To support case insensitive searches in such fields change 
      the type to "lowercase"
    -->
-   <dynamicField name="str/*"  type="string"  indexed="true"  stored="true" multiValued="true"/>
+   <dynamicField name="str/*"  type="string"  indexed="true"  stored="true" multiValued="true" omitNorms="false"/>
    <!-- 
      references are values that represent IDs of other resources.
      Typically this will store URIs but in principle also other IDs
@@ -349,15 +329,16 @@
          en-GB and one for other english text
    -->
    <!-- 
-     Dynamic field for english languages.
-     Note that the prefix "@en*" matches also "@en-GB" and "@en-US"
+    Dynamic field for English languages.
+    Note that the prefix "@en*" matches also "@en-GB" and "@en-US"
    -->
-   <dynamicField name="@en*"  type="text_en_Tight" indexed="true" stored="true" multiValued="true"/>
-   <!-- 
-     The "@*" catches all the other languages including "@/" 
+   <dynamicField name="@en*"  type="text_en" indexed="true" stored="true" multiValued="true" omitNorms="false"/>
+
+   <!--
+     The "@*" catches all the other languages including "@/"
      (default language) used for texts without a defined language
    -->
-   <dynamicField name="@*"  type="textgen"  indexed="true"  stored="true" multiValued="true"/>
+   <dynamicField name="@*"  type="textgen"  indexed="true"  stored="true" multiValued="true" omitNorms="false"/>
 
    <!--
      To add special configurations for specific fields one
@@ -400,14 +381,14 @@
      This field need not to be stored. The type can be changed to alternatives
      as described in the types section of this configuration.
    -->
-   <dynamicField name="_!@*"  type="textgen"  indexed="true"  stored="false" multiValued="true"/>
+   <dynamicField name="_!@*"  type="textgen"  indexed="true"  stored="false" multiValued="true" omitNorms="false"/>
    <!-- 
      fields starting with "_config/" are used to store configurations about how the
      index was created within the index (e.g. used namespace prefixes).
      Do not change this definition!
    -->
    <dynamicField name="_config/*" type="string" indexed="false" multiValued="true"/>
-      
+   
  </fields>
 
  <!-- 
@@ -416,20 +397,12 @@
  <uniqueKey>uri</uniqueKey>
 
  <!-- 
-   field for the QueryParser to use when an explicit fieldname is absent.
-   The SolrYard does currently not take advantage of this. However it can
-   be used when directly accessing the SolrYard.
- -->
- <defaultSearchField>_text</defaultSearchField>
+   defaultSearchFiel is DEPRECATED as of Solr 4
+ <defaultSearchField>_text</defaultSearchField> -->
 
  <!--
-   The SolrYard explizitly adds AND and OR for all boolean terms in
-   generated queries. So changing that should have no influence on
-   the SolrYard (not tested) 
-   
-   SolrQueryParser configuration: defaultOperator="AND|OR" 
- -->
- <solrQueryParser defaultOperator="OR"/>
+   solrQueryParser defaultOperator is DEPRECATED as of Solr 4
+ <solrQueryParser defaultOperator="OR"/> -->
 
   <!--
     The SolrYard Implementation assumes the following copyField commands.
@@ -454,5 +427,7 @@
      all references to it)
    -->
    <copyField source="ref/*" dest="_ref"/>
-   
+	
+
+
 </schema>