You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2017/04/03 18:45:07 UTC

[01/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10344: Update Solr default/example and test configs to use WordDelimiterGraphFilterFactory

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-9959 5be37efa0 -> 768524a41


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema.xml b/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema.xml
index 49f08b8..fb1a1ac 100644
--- a/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema.xml
+++ b/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema.xml
@@ -47,26 +47,41 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <fieldType name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -166,9 +181,16 @@
     </analyzer>
   </fieldType>
   <fieldType name="lowerpunctfilt" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+              catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -240,15 +262,16 @@
   <fieldType name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -260,15 +283,16 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
@@ -279,8 +303,9 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -293,13 +318,14 @@
   <fieldType name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -309,13 +335,14 @@
   <fieldType name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/solrj/src/test-files/solrj/solr/configsets/streaming/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/configsets/streaming/conf/schema.xml b/solr/solrj/src/test-files/solrj/solr/configsets/streaming/conf/schema.xml
index b61a2e9..a3b6a4e 100644
--- a/solr/solrj/src/test-files/solrj/solr/configsets/streaming/conf/schema.xml
+++ b/solr/solrj/src/test-files/solrj/solr/configsets/streaming/conf/schema.xml
@@ -73,24 +73,37 @@
     <fieldtype name="failtype1" class="solr.TextField">
       <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
       </analyzer>
     </fieldtype>
 
     <!-- Demonstrating ignoreCaseChange -->
     <fieldtype name="wdf_nocase" class="solr.TextField">
-      <analyzer>
+      <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.MockTokenizerFactory"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldtype>
 
     <fieldtype name="wdf_preserve" class="solr.TextField">
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.MockTokenizerFactory"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldtype>
@@ -179,9 +192,15 @@
       </analyzer>
     </fieldtype>
     <fieldtype name="lowerpunctfilt" class="solr.TextField">
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.MockTokenizerFactory"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldtype>
@@ -252,14 +271,15 @@
     <fieldtype name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
       <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.StopFilterFactory"/>
         <filter class="solr.PorterStemFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.StopFilterFactory"/>
         <filter class="solr.PorterStemFilterFactory"/>
@@ -270,14 +290,15 @@
       <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory"/>
         <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
         <filter class="solr.StopFilterFactory"/>
         <filter class="solr.PorterStemFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
         <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.StopFilterFactory"/>
         <filter class="solr.PorterStemFilterFactory"/>
       </analyzer>
@@ -287,7 +308,8 @@
       <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory"/>
         <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory"  splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory"  splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
@@ -300,12 +322,13 @@
     <fieldtype name="skutype1" class="solr.TextField">
       <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldtype>
@@ -314,12 +337,13 @@
     <fieldtype name="skutype2" class="solr.TextField">
       <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldtype>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/solrj/src/test/org/apache/solr/client/solrj/request/SchemaTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/request/SchemaTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/request/SchemaTest.java
index 0826c27..3bbcff1 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/request/SchemaTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/request/SchemaTest.java
@@ -529,7 +529,7 @@ public class SchemaTest extends RestTestBase {
     tokenizerAttributes.put("class", "solr.WhitespaceTokenizerFactory");
     analyzerDefinition.setTokenizer(tokenizerAttributes);
     Map<String, Object> filterAttributes = new LinkedHashMap<>();
-    filterAttributes.put("class", "solr.WordDelimiterFilterFactory");
+    filterAttributes.put("class", "solr.WordDelimiterGraphFilterFactory");
     filterAttributes.put("preserveOriginal", "0");
     analyzerDefinition.setFilters(Collections.singletonList(filterAttributes));
     fieldTypeDefinition.setAnalyzer(analyzerDefinition);


[34/52] [abbrv] lucene-solr git commit: LUCENE-7763: Remove outdated comment in IndexWriterConfig.setIndexSort javadocs. (马可阳 via Christine Poerschke)

Posted by ab...@apache.org.
LUCENE-7763: Remove outdated comment in IndexWriterConfig.setIndexSort javadocs.
(\u9a6c\u53ef\u9633 via Christine Poerschke)


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/41355565
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/41355565
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/41355565

Branch: refs/heads/jira/solr-9959
Commit: 413555651da61b764c05313b37b44339581be02d
Parents: bdb0d58
Author: Christine Poerschke <cp...@apache.org>
Authored: Fri Mar 31 18:10:27 2017 +0100
Committer: Christine Poerschke <cp...@apache.org>
Committed: Fri Mar 31 18:10:27 2017 +0100

----------------------------------------------------------------------
 lucene/CHANGES.txt                                              | 5 +++++
 .../src/java/org/apache/lucene/index/IndexWriterConfig.java     | 2 +-
 2 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/41355565/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index e0827e7..833fd3c 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -106,6 +106,11 @@ Bug Fixes
 * LUCENE-7755: Fixed join queries to not reference IndexReaders, as it could
   cause leaks if they are cached. (Adrien Grand)
 
+Other
+
+* LUCENE-7763: Remove outdated comment in IndexWriterConfig.setIndexSort javadocs.
+  (\u9a6c\u53ef\u9633 via Christine Poerschke)
+
 ======================= Lucene 6.5.0 =======================
 
 API Changes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/41355565/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
index 1e1e795..0fdbc3e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
@@ -466,7 +466,7 @@ public final class IndexWriterConfig extends LiveIndexWriterConfig {
                                                                                      SortField.Type.FLOAT);
 
   /**
-   * Set the {@link Sort} order to use when merging segments.  Note that newly flushed segments will remain unsorted.
+   * Set the {@link Sort} order to use when merging segments.
    */
   public IndexWriterConfig setIndexSort(Sort sort) {
     for(SortField sortField : sort.getSort()) {


[13/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10365: Handle a SolrCoreInitializationException while publishing core state during SolrCore creation

Posted by ab...@apache.org.
SOLR-10365: Handle a SolrCoreInitializationException while publishing core state during SolrCore creation


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/0322068e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/0322068e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/0322068e

Branch: refs/heads/jira/solr-9959
Commit: 0322068ea4648c93405da5b60fcbcc3467f5b009
Parents: aa2b46a
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Wed Mar 29 00:26:31 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Wed Mar 29 00:26:31 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  3 ++
 .../org/apache/solr/cloud/ZkController.java     |  4 +++
 .../org/apache/solr/core/CoreContainer.java     |  6 ++--
 .../core/SolrCoreInitializationException.java   | 32 ++++++++++++++++++++
 4 files changed, 41 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0322068e/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2d180a3..8875160 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -158,6 +158,9 @@ Other Changes
 
 * SOLR-10343: Update Solr default/example and test configs to use SynonymGraphFilterFactory. (Steve Rowe)
 
+* SOLR-10365: Handle a SolrCoreInitializationException while publishing core state during SolrCore creation
+  (Ishan Chattopadhyaya)
+
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0322068e/solr/core/src/java/org/apache/solr/cloud/ZkController.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index a3f1fd4..677bf29 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -85,6 +85,7 @@ import org.apache.solr.core.CloudConfig;
 import org.apache.solr.core.CoreContainer;
 import org.apache.solr.core.CoreDescriptor;
 import org.apache.solr.core.SolrCore;
+import org.apache.solr.core.SolrCoreInitializationException;
 import org.apache.solr.logging.MDCLoggingContext;
 import org.apache.solr.update.UpdateLog;
 import org.apache.zookeeper.CreateMode;
@@ -1232,6 +1233,9 @@ public class ZkController {
             }
           }
         }
+      } catch (SolrCoreInitializationException ex) {
+        // The core had failed to initialize (in a previous request, not this one), hence nothing to do here.
+        log.info("The core '{}' had failed to initialize before.", cd.getName());
       }
       
       ZkNodeProps m = new ZkNodeProps(props);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0322068e/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 9e22f91..9db3261 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -1288,7 +1288,7 @@ public class CoreContainer {
    * @see SolrCore#close()
    * @param name the core name
    * @return the core if found, null if a SolrCore by this name does not exist
-   * @exception SolrException if a SolrCore with this name failed to be initialized
+   * @exception SolrCoreInitializationException if a SolrCore with this name failed to be initialized
    */
   public SolrCore getCore(String name) {
 
@@ -1307,9 +1307,7 @@ public class CoreContainer {
       // error with the details for clients attempting to access it.
       CoreLoadFailure loadFailure = getCoreInitFailures().get(name);
       if (null != loadFailure) {
-        throw new SolrException(ErrorCode.SERVER_ERROR, "SolrCore '" + name +
-                                "' is not available due to init failure: " +
-                                loadFailure.exception.getMessage(), loadFailure.exception);
+        throw new SolrCoreInitializationException(name, loadFailure.exception);
       }
       // otherwise the user is simply asking for something that doesn't exist.
       return null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0322068e/solr/core/src/java/org/apache/solr/core/SolrCoreInitializationException.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCoreInitializationException.java b/solr/core/src/java/org/apache/solr/core/SolrCoreInitializationException.java
new file mode 100644
index 0000000..93b653c
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/core/SolrCoreInitializationException.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.core;
+
+import org.apache.solr.common.SolrException;
+
+public class SolrCoreInitializationException extends SolrException {
+
+  public SolrCoreInitializationException(ErrorCode code, String msg) {
+    super(code, msg);
+  }
+  
+  public SolrCoreInitializationException(String coreName, Exception loadException) {
+    super(ErrorCode.SERVER_ERROR, "SolrCore '" + coreName +
+        "' is not available due to init failure: " +
+        loadException.getMessage(), loadException);
+  }
+}
\ No newline at end of file


[03/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10344: Update Solr default/example and test configs to use WordDelimiterGraphFilterFactory

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-single-dynamic-copy-field.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-single-dynamic-copy-field.xml b/solr/core/src/test-files/solr/collection1/conf/schema-single-dynamic-copy-field.xml
index f7cbf6d..72dc723 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-single-dynamic-copy-field.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-single-dynamic-copy-field.xml
@@ -45,26 +45,41 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <fieldType name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -90,11 +105,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -103,7 +119,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -153,11 +169,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -166,7 +183,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -300,15 +317,16 @@
   <fieldType name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -320,16 +338,17 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
@@ -341,9 +360,10 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -356,13 +376,14 @@
   <fieldType name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -372,13 +393,14 @@
   <fieldType name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -401,16 +423,17 @@
               <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
               -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-sql.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-sql.xml b/solr/core/src/test-files/solr/collection1/conf/schema-sql.xml
index 818a6c8..dbaac57 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-sql.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-sql.xml
@@ -72,26 +72,41 @@
   <fieldtype name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldtype>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldtype name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldtype>
 
   <fieldtype name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -191,9 +206,16 @@
     </analyzer>
   </fieldtype>
   <fieldtype name="lowerpunctfilt" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+              catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -265,15 +287,16 @@
   <fieldtype name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -285,15 +308,16 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
@@ -304,8 +328,9 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -318,13 +343,14 @@
   <fieldtype name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -334,13 +360,14 @@
   <fieldtype name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml b/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
index 1df0a0f..375036f 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
@@ -151,7 +151,7 @@
     </analyzer>
   </fieldType>
 
-  <!-- A text field that uses WordDelimiterFilter to enable splitting and matching of
+  <!-- A text field that uses WordDelimiterGraphFilter to enable splitting and matching of
       words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
       so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
       Synonyms and stopwords are customized by external files, and stemming is enabled.
@@ -165,18 +165,19 @@
       <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -189,11 +190,23 @@
   <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
        but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
   <fieldType name="textTight" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+              catenateNumbers="1" catenateAll="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+      <filter class="solr.EnglishMinimalStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema.xml b/solr/core/src/test-files/solr/collection1/conf/schema.xml
index 6f5eddc..23ac326 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema.xml
@@ -55,26 +55,41 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <fieldType name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -196,7 +211,15 @@
     </analyzer>
   </fieldType>
   <fieldType name="lowerpunctfilt" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+              catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
@@ -274,15 +297,16 @@
   <fieldType name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -294,16 +318,17 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
@@ -315,9 +340,10 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -330,13 +356,14 @@
   <fieldType name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -346,13 +373,14 @@
   <fieldType name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema11.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema11.xml b/solr/core/src/test-files/solr/collection1/conf/schema11.xml
index 7591c96..db0770b 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema11.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema11.xml
@@ -147,7 +147,7 @@
       </analyzer>
     </fieldType>
 
-    <!-- A text field that uses WordDelimiterFilter to enable splitting and matching of
+    <!-- A text field that uses WordDelimiterGraphFilter to enable splitting and matching of
         words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
         so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
         Synonyms and stopwords are customized by external files, and stemming is enabled.
@@ -161,17 +161,18 @@
         <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
         -->
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
@@ -183,11 +184,22 @@
     <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
          but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
     <fieldType name="textTight" class="solr.TextField" positionIncrementGap="100" >
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.MockTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.EnglishMinimalStemFilterFactory"/>
@@ -417,17 +429,18 @@ valued. -->
             <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
        -->
        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-       <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
        <filter class="solr.LowerCaseFilterFactory"/>
        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
        <filter class="solr.PorterStemFilterFactory"/>
        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+       <filter class="solr.FlattenGraphFilterFactory"/>
      </analyzer>
      <analyzer type="query">
        <tokenizer class="solr.MockTokenizerFactory"/>
        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-       <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
        <filter class="solr.LowerCaseFilterFactory"/>
        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
        <filter class="solr.PorterStemFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema12.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema12.xml b/solr/core/src/test-files/solr/collection1/conf/schema12.xml
index 8577440..db91377 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema12.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema12.xml
@@ -54,26 +54,41 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <fieldType name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -99,11 +114,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -112,7 +128,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -129,11 +145,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -142,7 +159,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -270,15 +287,16 @@
   <fieldType name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -290,16 +308,17 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
@@ -311,9 +330,10 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -326,13 +346,14 @@
   <fieldType name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -342,13 +363,14 @@
   <fieldType name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -371,16 +393,17 @@
       <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema15.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema15.xml b/solr/core/src/test-files/solr/collection1/conf/schema15.xml
index e2c14f0..8fb8d44 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema15.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema15.xml
@@ -46,26 +46,41 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <fieldType name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -91,11 +106,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -104,7 +120,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -121,11 +137,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -134,7 +151,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -268,15 +285,16 @@
   <fieldType name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -288,16 +306,17 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
@@ -309,9 +328,10 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -324,13 +344,14 @@
   <fieldType name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -340,13 +361,14 @@
   <fieldType name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -369,16 +391,17 @@
       <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml b/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
index dfeac32..1163ab5 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
@@ -541,7 +541,7 @@
   <!-- A text field with defaults appropriate for English, plus
  aggressive word-splitting and autophrase features enabled.
  This field is just like text_en, except it adds
- WordDelimiterFilter to enable splitting and matching of
+ WordDelimiterGraphFilter to enable splitting and matching of
  words on case-change, alpha numeric boundaries, and
  non-alphanumeric chars.  This means certain compound word
  cases will work, for example query "wi fi" will match
@@ -560,11 +560,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -573,7 +574,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -585,17 +586,31 @@
        but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
   <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100"
              autoGeneratePhraseQueries="true">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+              catenateNumbers="1" catenateAll="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+      <filter class="solr.EnglishMinimalStemFilterFactory"/>
+      <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+           possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.EnglishMinimalStemFilterFactory"/>
       <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-           possible with WordDelimiterFilter in conjuncton with stemming. -->
+           possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
     </analyzer>
   </fieldType>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml b/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
index e22eb2b..3071968 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
@@ -47,26 +47,41 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <fieldType name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -92,11 +107,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -105,7 +121,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -122,11 +138,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -135,7 +152,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -263,15 +280,16 @@
   <fieldType name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -283,16 +301,17 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
@@ -304,9 +323,10 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -319,13 +339,14 @@
   <fieldType name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -335,13 +356,14 @@
   <fieldType name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -364,16 +386,17 @@
       <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
@@ -384,15 +407,16 @@
   <fieldType name="textgen" class="solr.TextField" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.EnglishMinimalStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.EnglishMinimalStemFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/synonyms.txt b/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
index 340abd7..f64cb72 100644
--- a/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
+++ b/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
@@ -23,7 +23,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/wdftypes.txt
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/wdftypes.txt b/solr/core/src/test-files/solr/collection1/conf/wdftypes.txt
index 7378b08..9a453c8 100644
--- a/solr/core/src/test-files/solr/collection1/conf/wdftypes.txt
+++ b/solr/core/src/test-files/solr/collection1/conf/wdftypes.txt
@@ -13,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-# A customized type mapping for WordDelimiterFilterFactory
+# A customized type mapping for WordDelimiterGraphFilterFactory
 # the allowable types are: LOWER, UPPER, ALPHA, DIGIT, ALPHANUM, SUBWORD_DELIM
 # 
 # the default for any character without a mapping is always computed from 


[27/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10351: Fix pre-commit

Posted by ab...@apache.org.
SOLR-10351: Fix pre-commit


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/bdd0c7e3
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/bdd0c7e3
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/bdd0c7e3

Branch: refs/heads/jira/solr-9959
Commit: bdd0c7e32087f534de04657fb3ef1b3afa93cc68
Parents: 6c2155c
Author: Joel Bernstein <jb...@apache.org>
Authored: Thu Mar 30 17:52:16 2017 +0100
Committer: Joel Bernstein <jb...@apache.org>
Committed: Thu Mar 30 17:53:07 2017 +0100

----------------------------------------------------------------------
 .../apache/solr/client/solrj/io/stream/StreamExpressionTest.java    | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bdd0c7e3/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 18ddb93..f153a1b 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -61,7 +61,6 @@ import org.apache.solr.cloud.AbstractDistribZkTestBase;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
-import org.apache.solr.handler.AnalyzeEvaluator;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;


[09/52] [abbrv] lucene-solr:jira/solr-9959: LUCENE-7754: Inner classes should be static whenever possible.

Posted by ab...@apache.org.
LUCENE-7754: Inner classes should be static whenever possible.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/53064e46
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/53064e46
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/53064e46

Branch: refs/heads/jira/solr-9959
Commit: 53064e46ddfc94a0b0e1d9c9f3e94786fb6701cd
Parents: 68eb078
Author: Adrien Grand <jp...@gmail.com>
Authored: Tue Mar 28 15:15:45 2017 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Tue Mar 28 15:35:04 2017 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                                |  6 +++++-
 .../analysis/charfilter/HTMLStripCharFilter.java  |  2 +-
 .../analysis/charfilter/HTMLStripCharFilter.jflex |  7 +++----
 .../lucene/analysis/shingle/ShingleFilter.java    |  2 +-
 .../lucene/analysis/hunspell/TestDictionary.java  |  2 +-
 .../miscellaneous/TestWordDelimiterFilter.java    |  2 +-
 .../TestWordDelimiterGraphFilter.java             |  2 +-
 .../payloads/NumericPayloadTokenFilterTest.java   |  2 +-
 .../payloads/TypeAsPayloadTokenFilterTest.java    |  2 +-
 .../analysis/sinks/TestTeeSinkTokenFilter.java    |  4 ++--
 .../lucene/analysis/snowball/TestSnowball.java    |  2 +-
 .../standard/TestUAX29URLEmailTokenizer.java      |  4 ++--
 .../lucene/analysis/icu/ICUTransformFilter.java   |  2 +-
 .../CachingNaiveBayesClassifierTest.java          |  2 +-
 .../SimpleNaiveBayesClassifierTest.java           |  2 +-
 .../bloom/BloomFilteringPostingsFormat.java       |  8 ++++----
 .../codecs/memory/MemoryPostingsFormat.java       |  2 +-
 .../simpletext/SimpleTextTermVectorsReader.java   |  2 +-
 .../compressing/CompressingTermVectorsReader.java |  2 +-
 .../apache/lucene/index/ParallelLeafReader.java   |  2 +-
 .../apache/lucene/search/ConstantScoreQuery.java  |  2 +-
 .../test/org/apache/lucene/index/TestCodecs.java  |  8 ++++----
 .../index/TestFlushByRamOrCountsPolicy.java       |  2 +-
 .../lucene/index/TestIndexWriterExceptions.java   |  2 +-
 .../lucene/index/TestIndexWriterMerging.java      |  2 +-
 .../lucene/index/TestIndexWriterWithThreads.java  |  2 +-
 .../apache/lucene/index/TestIndexableField.java   |  2 +-
 .../apache/lucene/index/TestMaxTermFrequency.java |  2 +-
 .../test/org/apache/lucene/index/TestNorms.java   |  2 +-
 .../lucene/index/TestPerSegmentDeletes.java       |  2 +-
 .../apache/lucene/index/TestStressIndexing2.java  |  2 +-
 .../lucene/index/TestTermVectorsReader.java       |  2 +-
 .../lucene/index/TestTransactionRollback.java     |  6 +++---
 .../apache/lucene/index/TestUniqueTermCount.java  |  2 +-
 .../lucene/search/TestCustomSearcherSort.java     |  4 ++--
 .../TestEarlyTerminatingSortingCollector.java     |  4 ++--
 .../apache/lucene/search/TestPrefixRandom.java    |  4 ++--
 .../apache/lucene/search/TestRegexpRandom2.java   |  2 +-
 .../lucene/search/TestSimilarityProvider.java     |  6 +++---
 .../org/apache/lucene/search/TestTermScorer.java  |  2 +-
 .../lucene/search/TestTimeLimitingCollector.java  |  2 +-
 .../org/apache/lucene/store/TestLockFactory.java  |  4 ++--
 .../lucene/util/TestCloseableThreadLocal.java     |  2 +-
 .../org/apache/lucene/util/TestQueryBuilder.java  |  2 +-
 .../test/org/apache/lucene/util/fst/TestFSTs.java |  2 +-
 .../facet/taxonomy/writercache/CollisionMap.java  |  2 +-
 .../search/grouping/GroupFacetCollectorTest.java  |  4 ++--
 .../TestUnifiedHighlighterExtensibility.java      |  2 +-
 .../apache/lucene/queries/CustomScoreQuery.java   |  2 +-
 .../lucene/queries/TestCustomScoreQuery.java      |  2 +-
 .../lucene/queries/payloads/TestPayloadSpans.java |  6 +++---
 .../queryparser/classic/TestMultiAnalyzer.java    |  8 ++++----
 .../classic/TestMultiFieldQueryParser.java        |  2 +-
 .../standard/TestMultiAnalyzerQPHelper.java       |  8 ++++----
 .../flexible/standard/TestQPHelper.java           |  8 ++++----
 .../queryparser/util/QueryParserTestBase.java     | 10 +++++-----
 .../sandbox/queries/FuzzyLikeThisQuery.java       |  2 +-
 .../lucene/payloads/TestPayloadSpanUtil.java      |  4 ++--
 .../search/spell/WordBreakSpellChecker.java       | 10 +++++-----
 .../search/suggest/document/ContextQuery.java     |  2 +-
 .../search/suggest/fst/ExternalRefSorter.java     |  2 +-
 .../search/suggest/fst/WFSTCompletionLookup.java  |  2 +-
 .../search/suggest/jaspell/JaspellLookup.java     |  8 ++++----
 .../suggest/jaspell/JaspellTernarySearchTrie.java |  2 +-
 .../search/suggest/DocumentDictionaryTest.java    |  2 +-
 .../analyzing/AnalyzingInfixSuggesterTest.java    |  2 +-
 .../suggest/analyzing/FuzzySuggesterTest.java     |  2 +-
 .../lucene/store/BaseLockFactoryTestCase.java     |  2 +-
 .../dependencies/GetMavenDependenciesTask.java    |  2 +-
 .../lucene/validation/LibVersionsCheckTask.java   |  2 +-
 .../handler/dataimport/MailEntityProcessor.java   |  4 ++--
 .../solr/handler/dataimport/VariableResolver.java |  2 +-
 .../dataimport/AbstractDIHCacheTestCase.java      |  2 +-
 .../AbstractSqlEntityProcessorTestCase.java       |  4 ++--
 .../dataimport/TestContentStreamDataSource.java   |  2 +-
 .../handler/extraction/XLSXResponseWriter.java    |  2 +-
 .../java/org/apache/solr/ltr/LTRScoringQuery.java |  2 +-
 .../apache/solr/ltr/TestLTRReRankingPipeline.java |  2 +-
 .../solr/response/VelocityResponseWriter.java     |  2 +-
 .../src/java/org/apache/solr/cloud/Overseer.java  |  2 +-
 .../org/apache/solr/cloud/OverseerTaskQueue.java  |  2 +-
 .../java/org/apache/solr/cloud/ZkController.java  |  2 +-
 .../apache/solr/core/CachingDirectoryFactory.java |  2 +-
 .../org/apache/solr/handler/CdcrReplicator.java   |  2 +-
 .../apache/solr/handler/CdcrReplicatorState.java  |  6 +++---
 .../org/apache/solr/handler/ExportWriter.java     | 18 +++++++++---------
 .../org/apache/solr/handler/IndexFetcher.java     |  4 ++--
 .../java/org/apache/solr/handler/SQLHandler.java  |  2 +-
 .../solr/handler/component/ExpandComponent.java   |  6 +++---
 .../component/HttpShardHandlerFactory.java        |  2 +-
 .../component/PivotFacetFieldValueCollection.java |  4 ++--
 .../solr/handler/component/TermsComponent.java    |  2 +-
 .../solr/highlight/DefaultSolrHighlighter.java    |  2 +-
 .../apache/solr/schema/ManagedIndexSchema.java    |  4 ++--
 .../org/apache/solr/schema/RandomSortField.java   |  2 +-
 .../solr/search/ComplexPhraseQParserPlugin.java   |  4 ++--
 .../apache/solr/search/ExportQParserPlugin.java   |  2 +-
 .../apache/solr/search/ExtendedDismaxQParser.java |  4 ++--
 .../org/apache/solr/search/HashQParserPlugin.java | 18 +++++++++---------
 .../org/apache/solr/search/ReRankCollector.java   |  2 +-
 .../apache/solr/search/ReRankQParserPlugin.java   |  2 +-
 .../solr/search/facet/FacetFieldMerger.java       |  2 +-
 .../org/apache/solr/search/facet/UniqueAgg.java   |  2 +-
 .../search/function/CollapseScoreFunction.java    |  2 +-
 .../org/apache/solr/search/join/GraphQuery.java   |  2 +-
 .../apache/solr/spelling/PossibilityIterator.java |  6 +++---
 .../java/org/apache/solr/update/UpdateLog.java    |  2 +-
 .../src/java/org/apache/solr/util/RTimer.java     |  2 +-
 .../java/org/apache/solr/util/SimplePostTool.java |  4 ++--
 .../solr/cloud/BaseCdcrDistributedZkTest.java     |  4 ++--
 .../solr/cloud/ChaosMonkeyNothingIsSafeTest.java  |  4 ++--
 .../apache/solr/cloud/ConnectionManagerTest.java  |  2 +-
 .../apache/solr/cloud/DistributedQueueTest.java   |  2 +-
 .../org/apache/solr/cloud/LeaderElectionTest.java |  4 ++--
 ...verriddenZkACLAndCredentialsProvidersTest.java |  4 ++--
 .../OverseerCollectionConfigSetProcessorTest.java |  2 +-
 .../org/apache/solr/cloud/ZkSolrClientTest.java   |  2 +-
 .../solr/core/CachingDirectoryFactoryTest.java    |  2 +-
 .../handler/AnalysisRequestHandlerTestBase.java   |  2 +-
 .../solr/handler/PingRequestHandlerTest.java      |  2 +-
 .../solr/handler/XmlUpdateRequestHandlerTest.java |  2 +-
 .../admin/CoreMergeIndexesAdminHandlerTest.java   |  2 +-
 .../apache/solr/internal/csv/CSVParserTest.java   |  2 +-
 .../reporters/SolrGraphiteReporterTest.java       |  2 +-
 .../solr/response/TestGraphMLResponseWriter.java  |  2 +-
 .../org/apache/solr/rest/TestManagedResource.java |  4 ++--
 .../org/apache/solr/rest/TestRestManager.java     |  2 +-
 .../schema/TestCloudManagedSchemaConcurrent.java  |  2 +-
 .../solr/search/AnalyticsTestQParserPlugin.java   | 10 +++++-----
 .../solr/search/TestExtendedDismaxParser.java     | 10 +++++-----
 .../org/apache/solr/search/TestFiltering.java     |  2 +-
 .../apache/solr/search/TestRankQueryPlugin.java   | 14 +++++++-------
 .../spelling/ConjunctionSolrSpellCheckerTest.java |  4 ++--
 .../solr/spelling/IndexBasedSpellCheckerTest.java |  2 +-
 .../solr/store/blockcache/BlockDirectoryTest.java |  2 +-
 .../org/apache/solr/util/SimplePostToolTest.java  |  2 +-
 .../org/apache/solr/util/SolrPluginUtilsTest.java |  2 +-
 .../apache/solr/util/TestSolrCLIRunExample.java   |  2 +-
 .../solrj/impl/ConcurrentUpdateSolrClient.java    |  2 +-
 .../apache/solr/client/solrj/io/ModelCache.java   |  4 ++--
 .../client/solrj/io/graph/GatherNodesStream.java  |  2 +-
 .../client/solrj/io/graph/ShortestPathStream.java |  2 +-
 .../solr/client/solrj/io/ops/GroupOperation.java  |  2 +-
 .../solr/client/solrj/io/stream/RankStream.java   |  2 +-
 .../solrj/io/stream/SignificantTermsStream.java   |  2 +-
 .../client/solrj/io/stream/TextLogitStream.java   |  2 +-
 .../client/solrj/response/SpellCheckResponse.java |  4 ++--
 .../apache/solr/common/cloud/ZkStateReader.java   |  2 +-
 .../solr/client/solrj/TestLBHttpSolrClient.java   |  2 +-
 .../solrj/embedded/SolrExampleStreamingTest.java  |  4 ++--
 .../impl/ConcurrentUpdateSolrClientTest.java      |  6 +++---
 .../src/java/org/apache/solr/SolrTestCaseJ4.java  |  2 +-
 152 files changed, 253 insertions(+), 250 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 15f4ff3..e6da586 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -81,7 +81,11 @@ Other
 * LUCENE-7681: MemoryIndex uses new DocValues API (Alan Woodward)
 
 ======================= Lucene 6.6.0 =======================
-(No Changes)
+
+Other
+
+* LUCENE-7754: Inner classes should be static whenever possible.
+  (Daniel Jelinski via Adrien Grand)
 
 ======================= Lucene 6.5.0 =======================
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
index ba44dd8..a236497 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java
@@ -30021,7 +30021,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter {
     return ZZ_BUFFERSIZE;
   }
 
-  private class TextSegment extends OpenStringBuilder {
+  private static class TextSegment extends OpenStringBuilder {
     /** The position from which the next char will be read. */
     int pos = 0;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex
index 98c3946..d810d79 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.jflex
@@ -19,13 +19,12 @@ package org.apache.lucene.analysis.charfilter;
 
 import java.io.IOException;
 import java.io.Reader;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.lucene.analysis.util.CharArrayMap;
-import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.analysis.CharArrayMap;
+import org.apache.lucene.analysis.CharArraySet;
 import org.apache.lucene.analysis.util.OpenStringBuilder;
 
 /**
@@ -238,7 +237,7 @@ InlineElment = ( [aAbBiIqQsSuU]                   |
     return ZZ_BUFFERSIZE;
   }
 
-  private class TextSegment extends OpenStringBuilder {
+  private static class TextSegment extends OpenStringBuilder {
     /** The position from which the next char will be read. */
     int pos = 0;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
index e3fa803..0775cf7 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
@@ -583,7 +583,7 @@ public final class ShingleFilter extends TokenFilter {
     }
   }
     
-  private class InputWindowToken {
+  private static class InputWindowToken {
     final AttributeSource attSource;
     final CharTermAttribute termAtt;
     final OffsetAttribute offsetAtt;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
index 5e32afe..b7312cb 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestDictionary.java
@@ -157,7 +157,7 @@ public class TestDictionary extends LuceneTestCase {
     tempDir.close();
   }
   
-  private class CloseCheckInputStream extends FilterInputStream {
+  private static class CloseCheckInputStream extends FilterInputStream {
     private boolean closed = false;
 
     public CloseCheckInputStream(InputStream delegate) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
index 7f0481f..2804bfd 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java
@@ -184,7 +184,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
   /*
    * Set a large position increment gap of 10 if the token is "largegap" or "/"
    */
-  private final class LargePosIncTokenFilter extends TokenFilter {
+  private static final class LargePosIncTokenFilter extends TokenFilter {
     private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
     private PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java
index f4e8b79..7516a23 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterGraphFilter.java
@@ -171,7 +171,7 @@ public class TestWordDelimiterGraphFilter extends BaseTokenStreamTestCase {
   /*
    * Set a large position increment gap of 10 if the token is "largegap" or "/"
    */
-  private final class LargePosIncTokenFilter extends TokenFilter {
+  private static final class LargePosIncTokenFilter extends TokenFilter {
     private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
     private PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
index 3e54cc0..81da458 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
@@ -57,7 +57,7 @@ public class NumericPayloadTokenFilterTest extends BaseTokenStreamTestCase {
     assertTrue(seenDogs + " does not equal: " + true, seenDogs == true);
   }
 
-  private final class WordTokenFilter extends TokenFilter {
+  private static final class WordTokenFilter extends TokenFilter {
     private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
     private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
index f901016..5333cf0 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
@@ -47,7 +47,7 @@ public class TypeAsPayloadTokenFilterTest extends BaseTokenStreamTestCase {
     assertTrue(count + " does not equal: " + 10, count == 10);
   }
 
-  private final class WordTokenFilter extends TokenFilter {
+  private static final class WordTokenFilter extends TokenFilter {
     private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
     private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
index c0127a3..5c2c2bb 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
@@ -223,7 +223,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
   }
 
 
-  class ModuloTokenFilter extends TokenFilter {
+  static class ModuloTokenFilter extends TokenFilter {
 
     int modCount;
 
@@ -248,7 +248,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
     }
   }
 
-  class ModuloSinkFilter extends FilteringTokenFilter {
+  static class ModuloSinkFilter extends FilteringTokenFilter {
     int count = 0;
     int modCount;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
index 881bc2f..783b707 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
@@ -71,7 +71,7 @@ public class TestSnowball extends BaseTokenStreamTestCase {
     assertEquals(new BytesRef(new byte[]{0,1,2,3}), payloadAtt.getPayload());
   }
   
-  private final class TestTokenStream extends TokenStream {
+  private static final class TestTokenStream extends TokenStream {
     private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
     private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
     private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizer.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizer.java
index 8d3c706..eaa5a44 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizer.java
@@ -127,7 +127,7 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase {
   }
 
   /** Passes through tokens with type "<URL>" and blocks all other types. */
-  private class URLFilter extends TokenFilter {
+  private static class URLFilter extends TokenFilter {
     private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
     public URLFilter(TokenStream in) {
       super(in);
@@ -146,7 +146,7 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase {
   }
   
   /** Passes through tokens with type "<EMAIL>" and blocks all other types. */
-  private class EmailFilter extends TokenFilter {
+  private static class EmailFilter extends TokenFilter {
     private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
     public EmailFilter(TokenStream in) {
       super(in);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java
index e48c4ba..bfe7c12 100644
--- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java
+++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilter.java
@@ -112,7 +112,7 @@ public final class ICUTransformFilter extends TokenFilter {
   /**
    * Wrap a {@link CharTermAttribute} with the Replaceable API.
    */
-  final class ReplaceableTermAttribute implements Replaceable {
+  static final class ReplaceableTermAttribute implements Replaceable {
     private char buffer[];
     private int length;
     private CharTermAttribute token;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
index acbfe82..ccdb207 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
@@ -82,7 +82,7 @@ public class CachingNaiveBayesClassifierTest extends ClassificationTestBase<Byte
     }
   }
 
-  private class NGramAnalyzer extends Analyzer {
+  private static class NGramAnalyzer extends Analyzer {
     @Override
     protected TokenStreamComponents createComponents(String fieldName) {
       final Tokenizer tokenizer = new KeywordTokenizer();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
----------------------------------------------------------------------
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
index 830ce2c..2b4873d 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
@@ -83,7 +83,7 @@ public class SimpleNaiveBayesClassifierTest extends ClassificationTestBase<Bytes
     }
   }
 
-  private class NGramAnalyzer extends Analyzer {
+  private static class NGramAnalyzer extends Analyzer {
     @Override
     protected TokenStreamComponents createComponents(String fieldName) {
       final Tokenizer tokenizer = new KeywordTokenizer();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
index ffe9fa1..b864bf0 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
@@ -217,8 +217,8 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
     public int size() {
       return delegateFieldsProducer.size();
     }
-    
-    class BloomFilteredTerms extends Terms {
+
+    static class BloomFilteredTerms extends Terms {
       private Terms delegateTerms;
       private FuzzySet filter;
       
@@ -288,8 +288,8 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
         return delegateTerms.getMax();
       }
     }
-    
-    final class BloomFilteredTermsEnum extends TermsEnum {
+
+    static final class BloomFilteredTermsEnum extends TermsEnum {
       private Terms delegateTerms;
       private TermsEnum delegateTermsEnum;
       private final FuzzySet filter;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
index 2f71765..dff445e 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
@@ -267,7 +267,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
   private static final int VERSION_START = 1;
   private static final int VERSION_CURRENT = VERSION_START;
 
-  private class MemoryFieldsConsumer extends FieldsConsumer {
+  private static class MemoryFieldsConsumer extends FieldsConsumer {
     private final SegmentWriteState state;
     private final IndexOutput out;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
----------------------------------------------------------------------
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
index c1808bf..873aaef 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
@@ -239,7 +239,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
     return scratchUTF16.toString();
   }
 
-  private class SimpleTVFields extends Fields {
+  private static class SimpleTVFields extends Fields {
     private final SortedMap<String,SimpleTVTerms> fields;
 
     SimpleTVFields(SortedMap<String,SimpleTVTerms> fields) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
index aa19f20..2e8ed63 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
@@ -742,7 +742,7 @@ public final class CompressingTermVectorsReader extends TermVectorsReader implem
 
   }
 
-  private class TVTerms extends Terms {
+  private static class TVTerms extends Terms {
 
     private final int numTerms, flags;
     private final int[] prefixLengths, suffixLengths, termFreqs, positionIndex, positions, startOffsets, lengths, payloadIndex;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java
index c67d07b..eee0c16 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ParallelLeafReader.java
@@ -160,7 +160,7 @@ public class ParallelLeafReader extends LeafReader {
   }
 
   // Single instance of this, per ParallelReader instance
-  private final class ParallelFields extends Fields {
+  private static final class ParallelFields extends Fields {
     final Map<String,Terms> fields = new TreeMap<>();
     
     ParallelFields() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index dbd05e8..8827a9f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -68,7 +68,7 @@ public final class ConstantScoreQuery extends Query {
    *  wraps a query with its own optimized top-level
    *  scorer (e.g. BooleanScorer) we can use that
    *  top-level scorer. */
-  protected class ConstantBulkScorer extends BulkScorer {
+  protected static class ConstantBulkScorer extends BulkScorer {
     final BulkScorer bulkScorer;
     final Weight weight;
     final float theScore;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
index 79783b5..c842606 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
@@ -75,7 +75,7 @@ public class TestCodecs extends LuceneTestCase {
     NUM_TEST_ITER = atLeast(20);
   }
 
-  class FieldData implements Comparable<FieldData> {
+  static class FieldData implements Comparable<FieldData> {
     final FieldInfo fieldInfo;
     final TermData[] terms;
     final boolean omitTF;
@@ -107,7 +107,7 @@ public class TestCodecs extends LuceneTestCase {
     }
   }
 
-  class PositionData {
+  static class PositionData {
     int pos;
     BytesRef payload;
 
@@ -117,7 +117,7 @@ public class TestCodecs extends LuceneTestCase {
     }
   }
 
-  class TermData implements Comparable<TermData> {
+  static class TermData implements Comparable<TermData> {
     String text2;
     final BytesRef text;
     int[] docs;
@@ -300,7 +300,7 @@ public class TestCodecs extends LuceneTestCase {
     dir.close();
   }
 
-  private class Verify extends Thread {
+  private static class Verify extends Thread {
     final Fields termsDict;
     final FieldData[] fields;
     final SegmentInfo si;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java
index 993a521..aa2901c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java
@@ -290,7 +290,7 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase {
     assertEquals(bytesUsed, flushControl.activeBytes());
   }
 
-  public class IndexThread extends Thread {
+  public static class IndexThread extends Thread {
     IndexWriter writer;
     LiveIndexWriterConfig iwc;
     LineFileDocs docs;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
index 987852f..c0907a5 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
@@ -346,7 +346,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
 
   private static String CRASH_FAIL_MESSAGE = "I'm experiencing problems";
 
-  private class CrashingFilter extends TokenFilter {
+  private static class CrashingFilter extends TokenFilter {
     String fieldName;
     int count;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
index e4d4018..6931efa 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
@@ -308,7 +308,7 @@ public class TestIndexWriterMerging extends LuceneTestCase {
   
   // Just intercepts all merges & verifies that we are never
   // merging a segment with >= 20 (maxMergeDocs) docs
-  private class MyMergeScheduler extends MergeScheduler {
+  private static class MyMergeScheduler extends MergeScheduler {
     @Override
     synchronized public void merge(IndexWriter writer, MergeTrigger trigger, boolean newMergesFound) throws IOException {
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
index 6b599d2..6c950b8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
@@ -51,7 +51,7 @@ import org.apache.lucene.util.LuceneTestCase.Slow;
 public class TestIndexWriterWithThreads extends LuceneTestCase {
 
   // Used by test cases below
-  private class IndexerThread extends Thread {
+  private static class IndexerThread extends Thread {
 
     boolean diskFull;
     Throwable error;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
index e60a3e1..510a899 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
@@ -41,7 +41,7 @@ import org.apache.lucene.util.TestUtil;
 
 public class TestIndexableField extends LuceneTestCase {
 
-  private class MyField implements IndexableField {
+  private static class MyField implements IndexableField {
 
     private final int counter;
     private final IndexableFieldType fieldType = new IndexableFieldType() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java b/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
index 900ec2b..4f74c30 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
@@ -99,7 +99,7 @@ public class TestMaxTermFrequency extends LuceneTestCase {
   /**
    * Simple similarity that encodes maxTermFrequency directly as a byte
    */
-  class TestSimilarity extends TFIDFSimilarity {
+  static class TestSimilarity extends TFIDFSimilarity {
 
     @Override
     public float lengthNorm(FieldInvertState state) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
index 363f57e..16ce61d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
@@ -50,7 +50,7 @@ import org.apache.lucene.util.TestUtil;
 public class TestNorms extends LuceneTestCase {
   final String byteTestField = "normsTestByte";
 
-  class CustomNormEncodingSimilarity extends TFIDFSimilarity {
+  static class CustomNormEncodingSimilarity extends TFIDFSimilarity {
 
     @Override
     public long encodeNormValue(float f) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
index 34d8afb..58ef890 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
@@ -242,7 +242,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
     return Arrays.copyOf(docs, numDocs);
   }
 
-  public class RangeMergePolicy extends MergePolicy {
+  public static class RangeMergePolicy extends MergePolicy {
     boolean doMerge = false;
     int start;
     int length;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
index 3cd9a6a..d386f39 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
@@ -686,7 +686,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
     assertFalse(fieldsEnum2.hasNext());
   }
 
-  private class IndexingThread extends Thread {
+  private static class IndexingThread extends Thread {
     IndexWriter w;
     int base;
     int range;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
index 4b2a060..76947dd 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
@@ -50,7 +50,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
   private FieldInfos fieldInfos = new FieldInfos(new FieldInfo[0]);
   private static int TERM_FREQ = 3;
 
-  private class TestToken implements Comparable<TestToken> {
+  private static class TestToken implements Comparable<TestToken> {
     String text;
     int pos;
     int startOffset;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java b/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java
index e7de028..eb08475 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java
@@ -157,7 +157,7 @@ public class TestTransactionRollback extends LuceneTestCase {
   }
 
   // Rolls back to previous commit point
-  class RollbackDeletionPolicy extends IndexDeletionPolicy {
+  static class RollbackDeletionPolicy extends IndexDeletionPolicy {
     private int rollbackPoint;
 
     public RollbackDeletionPolicy(int rollbackPoint) {
@@ -197,7 +197,7 @@ public class TestTransactionRollback extends LuceneTestCase {
     }
   }
 
-  class DeleteLastCommitPolicy extends IndexDeletionPolicy {
+  static class DeleteLastCommitPolicy extends IndexDeletionPolicy {
 
     @Override
     public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
@@ -222,7 +222,7 @@ public class TestTransactionRollback extends LuceneTestCase {
   }
 
   // Keeps all commit points (used to build index)
-  class KeepAllDeletionPolicy extends IndexDeletionPolicy {
+  static class KeepAllDeletionPolicy extends IndexDeletionPolicy {
     @Override
     public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
     @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java b/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java
index ee47fff..a0fca4c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java
@@ -98,7 +98,7 @@ public class TestUniqueTermCount extends LuceneTestCase {
   /**
    * Simple similarity that encodes maxTermFrequency directly
    */
-  class TestSimilarity extends Similarity {
+  static class TestSimilarity extends Similarity {
 
     @Override
     public long computeNorm(FieldInvertState state) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java b/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
index a9bac60..f09dfde 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java
@@ -183,7 +183,7 @@ public class TestCustomSearcherSort extends LuceneTestCase {
     if (VERBOSE) System.out.println(message);
   }
   
-  public class CustomSearcher extends IndexSearcher {
+  public static class CustomSearcher extends IndexSearcher {
     private int switcher;
     
     public CustomSearcher(IndexReader r, int switcher) {
@@ -212,7 +212,7 @@ public class TestCustomSearcherSort extends LuceneTestCase {
     }
   }
   
-  private class RandomGen {
+  private static class RandomGen {
     RandomGen(Random random) {
       this.random = random;
       base.set(1980, 1, 1);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
index d12caf1..6d699e8 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
@@ -201,7 +201,7 @@ public class TestEarlyTerminatingSortingCollector extends LuceneTestCase {
     }
   }
 
-  private class TestTerminatedEarlySimpleCollector extends SimpleCollector {
+  private static class TestTerminatedEarlySimpleCollector extends SimpleCollector {
     private boolean collectedSomething;
     public boolean collectedSomething() {
       return collectedSomething;
@@ -216,7 +216,7 @@ public class TestEarlyTerminatingSortingCollector extends LuceneTestCase {
     }
   }
 
-  private class TestEarlyTerminatingSortingcollectorQueryTimeout implements QueryTimeout {
+  private static class TestEarlyTerminatingSortingcollectorQueryTimeout implements QueryTimeout {
     final private boolean shouldExit;
     public TestEarlyTerminatingSortingcollectorQueryTimeout(boolean shouldExit) {
       this.shouldExit = shouldExit;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java
index 72fdc7a..f332a36 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java
@@ -75,7 +75,7 @@ public class TestPrefixRandom extends LuceneTestCase {
   }
   
   /** a stupid prefix query that just blasts thru the terms */
-  private class DumbPrefixQuery extends MultiTermQuery {
+  private static class DumbPrefixQuery extends MultiTermQuery {
     private final BytesRef prefix;
     
     DumbPrefixQuery(Term term) {
@@ -88,7 +88,7 @@ public class TestPrefixRandom extends LuceneTestCase {
       return new SimplePrefixTermsEnum(terms.iterator(), prefix);
     }
 
-    private class SimplePrefixTermsEnum extends FilteredTermsEnum {
+    private static class SimplePrefixTermsEnum extends FilteredTermsEnum {
       private final BytesRef prefix;
 
       private SimplePrefixTermsEnum(TermsEnum tenum, BytesRef prefix) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
index a704d54..78a4352 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
@@ -102,7 +102,7 @@ public class TestRegexpRandom2 extends LuceneTestCase {
   }
   
   /** a stupid regexp query that just blasts thru the terms */
-  private class DumbRegexpQuery extends MultiTermQuery {
+  private static class DumbRegexpQuery extends MultiTermQuery {
     private final Automaton automaton;
     
     DumbRegexpQuery(Term term, int flags) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java b/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java
index de96755..fbc0b35 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java
@@ -90,7 +90,7 @@ public class TestSimilarityProvider extends LuceneTestCase {
     assertTrue(foodocs.scoreDocs[0].score < bardocs.scoreDocs[0].score);
   }
   
-  private class ExampleSimilarityProvider extends PerFieldSimilarityWrapper {
+  private static class ExampleSimilarityProvider extends PerFieldSimilarityWrapper {
     private Similarity sim1 = new Sim1();
     private Similarity sim2 = new Sim2();
     
@@ -104,7 +104,7 @@ public class TestSimilarityProvider extends LuceneTestCase {
     }
   }
   
-  private class Sim1 extends TFIDFSimilarity {
+  private static class Sim1 extends TFIDFSimilarity {
     
     @Override
     public long encodeNormValue(float f) {
@@ -142,7 +142,7 @@ public class TestSimilarityProvider extends LuceneTestCase {
     }
   }
   
-  private class Sim2 extends TFIDFSimilarity {
+  private static class Sim2 extends TFIDFSimilarity {
     
     @Override
     public long encodeNormValue(float f) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
index d00e520..f0ad9b9 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
@@ -153,7 +153,7 @@ public class TestTermScorer extends LuceneTestCase {
     assertTrue("doc should be number 5", ts.docID() == 5);
   }
   
-  private class TestHit {
+  private static class TestHit {
     public int doc;
     public float score;
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
index d26006c..6a02e58 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
@@ -320,7 +320,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase {
   }
   
   // counting collector that can slow down at collect().
-  private class MyHitCollector extends SimpleCollector {
+  private static class MyHitCollector extends SimpleCollector {
     private final BitSet bits = new BitSet();
     private int slowdown = 0;
     private int lastDocCollected = -1;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java b/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java
index fa7a3fb..c257861 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java
@@ -76,7 +76,7 @@ public class TestLockFactory extends LuceneTestCase {
         }
     }
 
-    class MockLockFactory extends LockFactory {
+    static class MockLockFactory extends LockFactory {
 
         public Map<String,Lock> locksCreated = Collections.synchronizedMap(new HashMap<String,Lock>());
 
@@ -87,7 +87,7 @@ public class TestLockFactory extends LuceneTestCase {
             return lock;
         }
 
-        public class MockLock extends Lock {
+        public static class MockLock extends Lock {
 
             @Override
             public void close() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java b/lucene/core/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java
index 9f56d7b..ca0f811 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java
@@ -40,7 +40,7 @@ public class TestCloseableThreadLocal extends LuceneTestCase {
     assertNull(ctl.get());
   }
 
-  public class InitValueThreadLocal extends CloseableThreadLocal<Object> {
+  public static class InitValueThreadLocal extends CloseableThreadLocal<Object> {
     @Override
     protected Object initialValue() {
       return TEST_VALUE;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/util/TestQueryBuilder.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestQueryBuilder.java b/lucene/core/src/test/org/apache/lucene/util/TestQueryBuilder.java
index fc04c5e..a9d803b 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestQueryBuilder.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestQueryBuilder.java
@@ -266,7 +266,7 @@ public class TestQueryBuilder extends LuceneTestCase {
     }
   }
   
-  private class SimpleCJKAnalyzer extends Analyzer {
+  private static class SimpleCJKAnalyzer extends Analyzer {
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
       return new TokenStreamComponents(new SimpleCJKTokenizer());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
index 7a8e5f5..6f704c9 100644
--- a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
+++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
@@ -1458,7 +1458,7 @@ public class TestFSTs extends LuceneTestCase {
   }
 
   // used by slowcompletor
-  class TwoLongs {
+  static class TwoLongs {
     long a;
     long b;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/CollisionMap.java
----------------------------------------------------------------------
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/CollisionMap.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/CollisionMap.java
index 205a540..b80f2bd 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/CollisionMap.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/CollisionMap.java
@@ -185,7 +185,7 @@ public class CollisionMap {
     return memoryUsage;
   }
 
-  private class EntryIterator implements Iterator<Entry> {
+  private static class EntryIterator implements Iterator<Entry> {
     Entry next;    // next entry to return
     int index;        // current slot 
     Entry[] ents;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
index 46141bc..c590502 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/GroupFacetCollectorTest.java
@@ -720,7 +720,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
     return null;
   }
 
-  private class IndexContext {
+  private static class IndexContext {
 
     final int numDocs;
     final DirectoryReader indexReader;
@@ -744,7 +744,7 @@ public class GroupFacetCollectorTest extends AbstractGroupingTestCase {
     }
   }
 
-  private class GroupedFacetResult {
+  private static class GroupedFacetResult {
 
     final int totalCount;
     final int totalMissingCount;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/visibility/TestUnifiedHighlighterExtensibility.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/visibility/TestUnifiedHighlighterExtensibility.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/visibility/TestUnifiedHighlighterExtensibility.java
index 08055a2..e5d7e82 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/visibility/TestUnifiedHighlighterExtensibility.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/visibility/TestUnifiedHighlighterExtensibility.java
@@ -202,7 +202,7 @@ public class TestUnifiedHighlighterExtensibility extends LuceneTestCase {
   }
 
   /** Tests maintaining extensibility/visibility of {@link org.apache.lucene.search.uhighlight.FieldHighlighter} out of package. */
-  private class CustomFieldHighlighter extends FieldHighlighter {
+  private static class CustomFieldHighlighter extends FieldHighlighter {
     CustomFieldHighlighter(String field, FieldOffsetStrategy fieldOffsetStrategy, BreakIterator breakIterator, PassageScorer passageScorer, int maxPassages, int maxNoHighlightPassages, PassageFormatter passageFormatter) {
       super(field, fieldOffsetStrategy, breakIterator, passageScorer, maxPassages, maxNoHighlightPassages, passageFormatter);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
index 2ae6c01..632bcc7 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
@@ -238,7 +238,7 @@ public class CustomScoreQuery extends Query implements Cloneable {
   /**
    * A scorer that applies a (callback) function on scores of the subQuery.
    */
-  private class CustomScorer extends FilterScorer {
+  private static class CustomScorer extends FilterScorer {
     private final float qWeight;
     private final Scorer subQueryScorer;
     private final Scorer[] valSrcScorers;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
index 540ecf8..d001cc8 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java
@@ -153,7 +153,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
     }
   }
 
-  private final class CustomExternalQuery extends CustomScoreQuery {
+  private static final class CustomExternalQuery extends CustomScoreQuery {
 
     @Override
     protected CustomScoreProvider getCustomScoreProvider(LeafReaderContext context) throws IOException {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpans.java
----------------------------------------------------------------------
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpans.java b/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpans.java
index 3c5b80f..c39421b 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpans.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/payloads/TestPayloadSpans.java
@@ -461,7 +461,7 @@ public class TestPayloadSpans extends LuceneTestCase {
     assertEquals("expected numSpans", numSpans, cnt);
   }
 
-  final class PayloadAnalyzer extends Analyzer {
+  static final class PayloadAnalyzer extends Analyzer {
 
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
@@ -470,7 +470,7 @@ public class TestPayloadSpans extends LuceneTestCase {
     }
   }
 
-  final class PayloadFilter extends TokenFilter {
+  static final class PayloadFilter extends TokenFilter {
     Set<String> entities = new HashSet<>();
     Set<String> nopayload = new HashSet<>();
     int pos;
@@ -515,7 +515,7 @@ public class TestPayloadSpans extends LuceneTestCase {
     }
   }
   
-  public final class TestPayloadAnalyzer extends Analyzer {
+  public static final class TestPayloadAnalyzer extends Analyzer {
 
     @Override
     public TokenStreamComponents createComponents(String fieldName) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java
index bdb6256..85a5f39 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java
@@ -120,7 +120,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
    * Expands "multi" to "multi" and "multi2", both at the same position,
    * and expands "triplemulti" to "triplemulti", "multi3", and "multi2".
    */
-  private class MultiAnalyzer extends Analyzer {
+  private static class MultiAnalyzer extends Analyzer {
 
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
@@ -129,7 +129,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
     }
   }
 
-  private final class TestFilter extends TokenFilter {
+  private static final class TestFilter extends TokenFilter {
 
     private String prevType;
     private int prevStartOffset;
@@ -191,7 +191,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
    * Analyzes "the quick brown" as: quick(incr=2) brown(incr=1).
    * Does not work correctly for input other than "the quick brown ...".
    */
-  private class PosIncrementAnalyzer extends Analyzer {
+  private static class PosIncrementAnalyzer extends Analyzer {
 
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
@@ -200,7 +200,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
     }
   }
 
-  private final class TestPosIncrementFilter extends TokenFilter {
+  private static final class TestPosIncrementFilter extends TokenFilter {
 
     CharTermAttribute termAtt;
     PositionIncrementAttribute posIncrAtt;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java
index 7490e8c..4c28e8f 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java
@@ -336,7 +336,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
   }
 
   /** whitespace+lowercase analyzer with synonyms (dogs,dog) and (guinea pig,cavy) */
-  private class MockSynonymAnalyzer extends Analyzer {
+  private static class MockSynonymAnalyzer extends Analyzer {
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
       Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java
index 56ef65a..220ce02 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java
@@ -139,7 +139,7 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase {
    * Expands "multi" to "multi" and "multi2", both at the same position, and
    * expands "triplemulti" to "triplemulti", "multi3", and "multi2".
    */
-  private class MultiAnalyzer extends Analyzer {
+  private static class MultiAnalyzer extends Analyzer {
 
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
@@ -148,7 +148,7 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase {
     }
   }
 
-  private final class TestFilter extends TokenFilter {
+  private static final class TestFilter extends TokenFilter {
 
     private String prevType;
     private int prevStartOffset;
@@ -206,7 +206,7 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase {
    * Analyzes "the quick brown" as: quick(incr=2) brown(incr=1). Does not work
    * correctly for input other than "the quick brown ...".
    */
-  private class PosIncrementAnalyzer extends Analyzer {
+  private static class PosIncrementAnalyzer extends Analyzer {
 
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
@@ -215,7 +215,7 @@ public class TestMultiAnalyzerQPHelper extends LuceneTestCase {
     }
   }
 
-  private class TestPosIncrementFilter extends TokenFilter {
+  private static class TestPosIncrementFilter extends TokenFilter {
 
     private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
     private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
index 2d5ee43..2aa96be 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
@@ -350,7 +350,7 @@ public class TestQPHelper extends LuceneTestCase {
   }
 
   //individual CJK chars as terms, like StandardAnalyzer
-  private class SimpleCJKTokenizer extends Tokenizer {
+  private static class SimpleCJKTokenizer extends Tokenizer {
     private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
 
     public SimpleCJKTokenizer() {
@@ -368,7 +368,7 @@ public class TestQPHelper extends LuceneTestCase {
     }
   }
 
-  private class SimpleCJKAnalyzer extends Analyzer {
+  private static class SimpleCJKAnalyzer extends Analyzer {
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
       return new TokenStreamComponents(new SimpleCJKTokenizer());
@@ -1252,7 +1252,7 @@ public class TestQPHelper extends LuceneTestCase {
     super.tearDown();
   }
 
-  private class CannedTokenizer extends Tokenizer {
+  private static class CannedTokenizer extends Tokenizer {
     private int upto = 0;
     private final PositionIncrementAttribute posIncr = addAttribute(PositionIncrementAttribute.class);
     private final CharTermAttribute term = addAttribute(CharTermAttribute.class);
@@ -1291,7 +1291,7 @@ public class TestQPHelper extends LuceneTestCase {
     }
   }
 
-  private class CannedAnalyzer extends Analyzer {
+  private static class CannedAnalyzer extends Analyzer {
     @Override
     public TokenStreamComponents createComponents(String ignored) {
       return new TokenStreamComponents(new CannedTokenizer());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
index 0041106..1b8ee96 100644
--- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
+++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
@@ -264,7 +264,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
     }
   }
 
-  private class SimpleCJKAnalyzer extends Analyzer {
+  private static class SimpleCJKAnalyzer extends Analyzer {
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
       return new TokenStreamComponents(new SimpleCJKTokenizer());
@@ -1095,7 +1095,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
   }
 
   /** whitespace+lowercase analyzer with synonyms */
-  protected class Analyzer1 extends Analyzer {
+  protected static class Analyzer1 extends Analyzer {
     public Analyzer1(){
       super();
     }
@@ -1107,7 +1107,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
   }
   
   /** whitespace+lowercase analyzer without synonyms */
-  protected class Analyzer2 extends Analyzer {
+  protected static class Analyzer2 extends Analyzer {
     public Analyzer2(){
       super();
     }
@@ -1122,7 +1122,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
   /**
    * Mock collation analyzer: indexes terms as "collated" + term
    */
-  private class MockCollationFilter extends TokenFilter {
+  private static class MockCollationFilter extends TokenFilter {
     private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
 
     protected MockCollationFilter(TokenStream input) {
@@ -1141,7 +1141,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
     }
     
   }
-  private class MockCollationAnalyzer extends Analyzer {
+  private static class MockCollationAnalyzer extends Analyzer {
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
       Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
index 8bd7b89..840ade3 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
@@ -112,7 +112,7 @@ public class FuzzyLikeThisQuery extends Query
     this.maxNumTerms = maxNumTerms;
   }
 
-  class FieldVals
+  static class FieldVals
   {
     String queryString;
     String fieldName;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/sandbox/src/test/org/apache/lucene/payloads/TestPayloadSpanUtil.java
----------------------------------------------------------------------
diff --git a/lucene/sandbox/src/test/org/apache/lucene/payloads/TestPayloadSpanUtil.java b/lucene/sandbox/src/test/org/apache/lucene/payloads/TestPayloadSpanUtil.java
index ecf2ff6..af19e3d 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/payloads/TestPayloadSpanUtil.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/payloads/TestPayloadSpanUtil.java
@@ -72,7 +72,7 @@ public class TestPayloadSpanUtil extends LuceneTestCase {
     directory.close();
   }
 
-  final class PayloadAnalyzer extends Analyzer {
+  final static class PayloadAnalyzer extends Analyzer {
 
     @Override
     public TokenStreamComponents createComponents(String fieldName) {
@@ -81,7 +81,7 @@ public class TestPayloadSpanUtil extends LuceneTestCase {
     }
   }
 
-  final class PayloadFilter extends TokenFilter {
+  static final class PayloadFilter extends TokenFilter {
     Set<String> entities = new HashSet<>();
     Set<String> nopayload = new HashSet<>();
     int pos;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java
index f3cc5bd..c075ad4 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java
@@ -441,7 +441,7 @@ public class WordBreakSpellChecker {
     this.maxEvaluations = maxEvaluations;
   }
   
-  private class LengthThenMaxFreqComparator implements
+  private static class LengthThenMaxFreqComparator implements
       Comparator<SuggestWordArrayWrapper> {
     @Override
     public int compare(SuggestWordArrayWrapper o1, SuggestWordArrayWrapper o2) {
@@ -455,7 +455,7 @@ public class WordBreakSpellChecker {
     }
   }
   
-  private class LengthThenSumFreqComparator implements
+  private static class LengthThenSumFreqComparator implements
       Comparator<SuggestWordArrayWrapper> {
     @Override
     public int compare(SuggestWordArrayWrapper o1, SuggestWordArrayWrapper o2) {
@@ -469,7 +469,7 @@ public class WordBreakSpellChecker {
     }
   }
   
-  private class CombinationsThenFreqComparator implements
+  private static class CombinationsThenFreqComparator implements
       Comparator<CombineSuggestionWrapper> {
     @Override
     public int compare(CombineSuggestionWrapper o1, CombineSuggestionWrapper o2) {
@@ -484,7 +484,7 @@ public class WordBreakSpellChecker {
     }
   }
   
-  private class SuggestWordArrayWrapper {
+  private static class SuggestWordArrayWrapper {
     final SuggestWord[] suggestWords;
     final int freqMax;
     final int freqSum;
@@ -502,7 +502,7 @@ public class WordBreakSpellChecker {
     }
   }
   
-  private class CombineSuggestionWrapper {
+  private static class CombineSuggestionWrapper {
     final CombineSuggestion combineSuggestion;
     final int numCombinations;
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java
index 7a5e3e0..4a29f24 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/ContextQuery.java
@@ -238,7 +238,7 @@ public class ContextQuery extends CompletionQuery {
     }
   }
 
-  private class ContextCompletionWeight extends CompletionWeight {
+  private static class ContextCompletionWeight extends CompletionWeight {
 
     private final Map<IntsRef, Float> contextMap;
     private final int[] contextLengths;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/ExternalRefSorter.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/ExternalRefSorter.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/ExternalRefSorter.java
index fb876d2..5b7e714 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/ExternalRefSorter.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/ExternalRefSorter.java
@@ -105,7 +105,7 @@ public class ExternalRefSorter implements BytesRefSorter, Closeable {
    * Iterate over byte refs in a file.
    */
   // TODO: this class is a bit silly ... sole purpose is to "remove" Closeable from what #iterator returns:
-  class ByteSequenceIterator implements BytesRefIterator {
+  static class ByteSequenceIterator implements BytesRefIterator {
     private final OfflineSorter.ByteSequencesReader reader;
     private BytesRef scratch;
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java
index 1cc89a4..d6c1a97 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java
@@ -267,7 +267,7 @@ public class WFSTCompletionLookup extends Lookup implements Accountable {
     return Integer.MAX_VALUE - (int)value;
   }
   
-  private final class WFSTInputIterator extends SortedInputIterator {
+  private static final class WFSTInputIterator extends SortedInputIterator {
 
     WFSTInputIterator(Directory tempDir, String tempFileNamePrefix, InputIterator source) throws IOException {
       super(tempDir, tempFileNamePrefix, source);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java
index 3e78f59..948ebeb 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java
@@ -147,17 +147,17 @@ public class JaspellLookup extends Lookup implements Accountable {
       node.data = Long.valueOf(in.readLong());
     }
     if ((mask & LO_KID) != 0) {
-      TSTNode kid = trie.new TSTNode('\0', node);
+      TSTNode kid = new TSTNode('\0', node);
       node.relatives[TSTNode.LOKID] = kid;
       readRecursively(in, kid);
     }
     if ((mask & EQ_KID) != 0) {
-      TSTNode kid = trie.new TSTNode('\0', node);
+      TSTNode kid = new TSTNode('\0', node);
       node.relatives[TSTNode.EQKID] = kid;
       readRecursively(in, kid);
     }
     if ((mask & HI_KID) != 0) {
-      TSTNode kid = trie.new TSTNode('\0', node);
+      TSTNode kid = new TSTNode('\0', node);
       node.relatives[TSTNode.HIKID] = kid;
       readRecursively(in, kid);
     }
@@ -196,7 +196,7 @@ public class JaspellLookup extends Lookup implements Accountable {
   @Override
   public boolean load(DataInput input) throws IOException {
     count = input.readVLong();
-    TSTNode root = trie.new TSTNode('\0', null);
+    TSTNode root = new TSTNode('\0', null);
     readRecursively(input, root);
     trie.setRoot(root);
     return true;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java
index 15deaea..2a4a439 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java
@@ -71,7 +71,7 @@ public class JaspellTernarySearchTrie implements Accountable {
   /**
    * An inner class of Ternary Search Trie that represents a node in the trie.
    */
-  protected final class TSTNode implements Accountable {
+  protected static final class TSTNode implements Accountable {
 
     /** Index values for accessing relatives array. */
     protected final static int PARENT = 0, LOKID = 1, EQKID = 2, HIKID = 3;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java
index a7e9b00..57173e2 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java
@@ -411,7 +411,7 @@ public class DocumentDictionaryTest extends LuceneTestCase {
     return suggestionList;
   }
 
-  private class Suggestion {
+  private static class Suggestion {
     private long weight;
     private BytesRef payload;
     private Set<BytesRef> contexts;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java
index 478358b..423e62a 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java
@@ -910,7 +910,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
     a.close();
   }
 
-  private class IndexDocument implements Runnable {
+  private static class IndexDocument implements Runnable {
     AnalyzingInfixSuggester suggester;
     String key;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java
----------------------------------------------------------------------
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java
index adda0fc..165c9f2 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java
@@ -1118,7 +1118,7 @@ public class FuzzySuggesterTest extends LuceneTestCase {
 
   private static final Comparator<CharSequence> CHARSEQUENCE_COMPARATOR = new CharSequenceComparator();
 
-  public class CompareByCostThenAlpha implements Comparator<LookupResult> {
+  public static class CompareByCostThenAlpha implements Comparator<LookupResult> {
     @Override
     public int compare(LookupResult a, LookupResult b) {
       if (a.value > b.value) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java
index 01791b7..312b644 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/store/BaseLockFactoryTestCase.java
@@ -274,7 +274,7 @@ public abstract class BaseLockFactoryTestCase extends LuceneTestCase {
     }
   }
 
-  private class SearcherThread extends Thread { 
+  private static class SearcherThread extends Thread {
     private Directory dir;
     private int numIteration;
     public boolean hitException = false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/tools/src/java/org/apache/lucene/dependencies/GetMavenDependenciesTask.java
----------------------------------------------------------------------
diff --git a/lucene/tools/src/java/org/apache/lucene/dependencies/GetMavenDependenciesTask.java b/lucene/tools/src/java/org/apache/lucene/dependencies/GetMavenDependenciesTask.java
index 5b2f0b8..2c20abd 100644
--- a/lucene/tools/src/java/org/apache/lucene/dependencies/GetMavenDependenciesTask.java
+++ b/lucene/tools/src/java/org/apache/lucene/dependencies/GetMavenDependenciesTask.java
@@ -786,7 +786,7 @@ public class GetMavenDependenciesTask extends Task {
   /**
    * Stores information about an external dependency
    */
-  private class ExternalDependency implements Comparable<ExternalDependency> {
+  private static class ExternalDependency implements Comparable<ExternalDependency> {
     String groupId;
     String artifactId;
     boolean isTestDependency;


[50/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-9959 Fix DIH IntelliJ setup.

Posted by ab...@apache.org.
SOLR-9959 Fix DIH IntelliJ setup.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/7ca861f2
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/7ca861f2
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/7ca861f2

Branch: refs/heads/jira/solr-9959
Commit: 7ca861f2e75e7b6ecbeb86ce97182a6d432b8acb
Parents: 98a595b
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Mon Apr 3 18:27:50 2017 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Mon Apr 3 18:27:50 2017 +0200

----------------------------------------------------------------------
 .../idea/solr/contrib/dataimporthandler/dataimporthandler.iml      | 2 +-
 solr/contrib/dataimporthandler/ivy.xml                             | 1 -
 2 files changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7ca861f2/dev-tools/idea/solr/contrib/dataimporthandler/dataimporthandler.iml
----------------------------------------------------------------------
diff --git a/dev-tools/idea/solr/contrib/dataimporthandler/dataimporthandler.iml b/dev-tools/idea/solr/contrib/dataimporthandler/dataimporthandler.iml
index bf3a12e..8240ff2 100644
--- a/dev-tools/idea/solr/contrib/dataimporthandler/dataimporthandler.iml
+++ b/dev-tools/idea/solr/contrib/dataimporthandler/dataimporthandler.iml
@@ -16,7 +16,7 @@
     <orderEntry type="library" scope="TEST" name="HSQLDB" level="project" />
     <orderEntry type="library" scope="TEST" name="Derby" level="project" />
     <orderEntry type="library" scope="TEST" name="Solr DIH test library" level="project" />
-    <orderEntry type="library" scope="TEST" name="Solr example library" level="project" />
+    <orderEntry type="library" name="Solr example library" level="project" />
     <orderEntry type="library" name="Solr core library" level="project" />
     <orderEntry type="library" name="Solrj library" level="project" />
     <orderEntry type="library" name="Solr DIH core library" level="project" />

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/7ca861f2/solr/contrib/dataimporthandler/ivy.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/ivy.xml b/solr/contrib/dataimporthandler/ivy.xml
index 5ec92ae..ea138dd 100644
--- a/solr/contrib/dataimporthandler/ivy.xml
+++ b/solr/contrib/dataimporthandler/ivy.xml
@@ -29,7 +29,6 @@
     <dependency org="org.mockito" name="mockito-core" rev="${/org.mockito/mockito-core}" conf="test"/>
     <dependency org="net.bytebuddy" name="byte-buddy" rev="${/net.bytebuddy/byte-buddy}" conf="test"/>
     <dependency org="org.objenesis" name="objenesis" rev="${/org.objenesis/objenesis}" conf="test"/>
-    <dependency org="io.dropwizard.metrics" name="metrics-core" rev="${/io.dropwizard.metrics/metrics-core}" conf="compile" />
     <exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/> 
   </dependencies>
 </ivy-module>


[49/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-9959 Fix test errors after merge.

Posted by ab...@apache.org.
SOLR-9959 Fix test errors after merge.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/98a595b3
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/98a595b3
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/98a595b3

Branch: refs/heads/jira/solr-9959
Commit: 98a595b3d2b97bca966a34f3934ff44f16aab551
Parents: 04a7122
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Mon Apr 3 13:56:23 2017 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Mon Apr 3 13:56:23 2017 +0200

----------------------------------------------------------------------
 .../solr/metrics/reporters/SolrJmxReporter.java | 24 +++++++++++++++-----
 .../org/apache/solr/util/stats/MetricUtils.java | 11 ++++++++-
 2 files changed, 28 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/98a595b3/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
index 3bed6f1..e123e44 100644
--- a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
+++ b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
@@ -25,6 +25,8 @@ import javax.management.ObjectName;
 
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Locale;
 
 import com.codahale.metrics.Gauge;
@@ -132,6 +134,7 @@ public class SolrJmxReporter extends SolrMetricReporter {
     }
     if (listener != null && registry != null) {
       registry.removeListener(listener);
+      listener.close();
     }
   }
 
@@ -219,6 +222,7 @@ public class SolrJmxReporter extends SolrMetricReporter {
   private static class MetricsMapListener extends MetricRegistryListener.Base {
     MBeanServer server;
     JmxObjectNameFactory nameFactory;
+    List<ObjectName> registered = new ArrayList<>();
 
     MetricsMapListener(MBeanServer server, JmxObjectNameFactory nameFactory) {
       this.server = server;
@@ -234,13 +238,21 @@ public class SolrJmxReporter extends SolrMetricReporter {
       ObjectName objectName = nameFactory.createName("gauges", nameFactory.getDomain(), name);
       try {
         server.registerMBean(gauge, objectName);
-      } catch (InstanceAlreadyExistsException e) {
-        log.warn("##### registration error", e);
-      } catch (MBeanRegistrationException e) {
-        log.warn("##### registration error", e);
-      } catch (NotCompliantMBeanException e) {
-        log.warn("##### registration error", e);
+        registered.add(objectName);
+      } catch (Exception e) {
+        log.warn("bean registration error", e);
       }
     }
+
+    public void close() {
+      for (ObjectName name : registered) {
+        try {
+          server.unregisterMBean(name);
+        } catch (Exception e) {
+          log.warn("bean unregistration error", e);
+        }
+      }
+
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/98a595b3/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java b/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
index 173172c..e09cc88 100644
--- a/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
+++ b/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
@@ -231,7 +231,16 @@ public class MetricUtils {
       consumer.accept(n, convertCounter(counter, compact));
     } else if (metric instanceof Gauge) {
       Gauge gauge = (Gauge) metric;
-      consumer.accept(n, convertGauge(gauge, compact));
+      try {
+        consumer.accept(n, convertGauge(gauge, compact));
+      } catch (InternalError ie) {
+        if (n.startsWith("memory.") && ie.getMessage().contains("Memory Pool not found")) {
+          LOG.warn("Error converting gauge '" + n + "', possible JDK bug: SOLR-10362", ie);
+          consumer.accept(n, null);
+        } else {
+          throw ie;
+        }
+      }
     } else if (metric instanceof Meter) {
       Meter meter = (Meter) metric;
       consumer.accept(n, convertMeter(meter));


[33/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-7452: add more tests for refinement of missing buckets

Posted by ab...@apache.org.
SOLR-7452: add more tests for refinement of missing buckets


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/bdb0d588
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/bdb0d588
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/bdb0d588

Branch: refs/heads/jira/solr-9959
Commit: bdb0d588ee8129cf42df03f6185fbb3f4d8e0af4
Parents: cc62340
Author: yonik <yo...@apache.org>
Authored: Fri Mar 31 12:55:15 2017 -0400
Committer: yonik <yo...@apache.org>
Committed: Fri Mar 31 12:55:15 2017 -0400

----------------------------------------------------------------------
 .../search/facet/TestJsonFacetRefinement.java   | 62 +++++++++++++-------
 1 file changed, 41 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/bdb0d588/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
index 52b8be4..bcb5f09 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
@@ -244,21 +244,22 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
 
     client.deleteByQuery("*:*", null);
 
-    ModifiableSolrParams p = params("cat_s", "cat_s", "xy_s", "xy_s", "num_d", "num_d");
+    ModifiableSolrParams p = params("cat_s", "cat_s", "xy_s", "xy_s", "num_d", "num_d", "qw_s", "qw_s");
     String cat_s = p.get("cat_s");
     String xy_s = p.get("xy_s");
+    String qw_s = p.get("qw_s");
     String num_d = p.get("num_d");
 
-    clients.get(0).add( sdoc("id", "01", "all_s","all", cat_s, "A", xy_s, "X" ,num_d, -1) ); // A wins count tie
-    clients.get(0).add( sdoc("id", "02", "all_s","all", cat_s, "B", xy_s, "Y", num_d, 3) );
+    clients.get(0).add( sdoc("id", "01", "all_s","all", cat_s, "A", xy_s, "X" ,num_d, -1,  qw_s, "Q") ); // A wins count tie
+    clients.get(0).add( sdoc("id", "02", "all_s","all", cat_s, "B", xy_s, "Y", num_d, 3             ) );
 
-    clients.get(1).add( sdoc("id", "11", "all_s","all", cat_s, "B", xy_s, "X", num_d, -5) ); // B highest count
-    clients.get(1).add( sdoc("id", "12", "all_s","all", cat_s, "B", xy_s, "Y", num_d, -11) );
-    clients.get(1).add( sdoc("id", "13", "all_s","all", cat_s, "A", xy_s, "X", num_d, 7) );
+    clients.get(1).add( sdoc("id", "11", "all_s","all", cat_s, "B", xy_s, "X", num_d, -5            ) ); // B highest count
+    clients.get(1).add( sdoc("id", "12", "all_s","all", cat_s, "B", xy_s, "Y", num_d, -11, qw_s, "W") );
+    clients.get(1).add( sdoc("id", "13", "all_s","all", cat_s, "A", xy_s, "X", num_d, 7             ) );
 
-    clients.get(2).add( sdoc("id", "21", "all_s","all", cat_s, "A", xy_s, "X", num_d, 17) ); // A highest count
-    clients.get(2).add( sdoc("id", "22", "all_s","all", cat_s, "A", xy_s, "Y", num_d, -19) );
-    clients.get(2).add( sdoc("id", "23", "all_s","all", cat_s, "B", xy_s, "X", num_d, 11) );
+    clients.get(2).add( sdoc("id", "21", "all_s","all", cat_s, "A", xy_s, "X", num_d, 17,  qw_s, "W") ); // A highest count
+    clients.get(2).add( sdoc("id", "22", "all_s","all", cat_s, "A", xy_s, "Y", num_d, -19           ) );
+    clients.get(2).add( sdoc("id", "23", "all_s","all", cat_s, "B", xy_s, "X", num_d, 11            ) );
 
     client.commit();
 
@@ -277,18 +278,6 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
     );
     ****/
 
-    // test refining under the special "missing" bucket of a field facet
-    client.testJQ(params(p, "q", "*:*",
-        "json.facet", "{" +
-            "f:{type:terms, field:missing_s, limit:1, overrequest:0, missing:true, refine:true,  facet:{  cat:{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true   }  }}" +
-            "}"
-        )
-        , "facets=={ count:8" +
-            ", f:{ buckets:[], missing:{count:8, cat:{buckets:[{val:A,count:4}]}  }  }" +  // just like the previous response, just nested under a field facet
-            "}"
-    );
-
-
     client.testJQ(params(p, "q", "*:*",
         "json.facet", "{" +
             "cat0:{type:terms, field:${cat_s}, sort:'count desc', limit:1, overrequest:0, refine:false}" +
@@ -367,6 +356,37 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
             "}"
     );
 
+    // test refining under the special "missing" bucket of a field facet
+    client.testJQ(params(p, "q", "*:*",
+        "json.facet", "{" +
+            "f:{type:terms, field:missing_s, limit:1, overrequest:0, missing:true, refine:true,  facet:{  cat:{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true   }  }}" +
+            "}"
+        )
+        , "facets=={ count:8" +
+            ", f:{ buckets:[], missing:{count:8, cat:{buckets:[{val:A,count:4}]}  }  }" +  // just like the previous response, just nested under a field facet
+            "}"
+    );
+
+    // test filling in "missing" bucket for partially refined facets
+    client.testJQ(params(p, "q", "*:*",
+        "json.facet", "{" +
+            // test all values missing in sub-facet
+            " ab :{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:false,  facet:{  zz:{type:terms, field:missing_s, limit:1, overrequest:0, refine:false, missing:true}  }}" +
+            ",ab2:{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true ,  facet:{  zz:{type:terms, field:missing_s, limit:1, overrequest:0, refine:true , missing:true}  }}" +
+            // test some values missing in sub-facet (and test that this works with normal partial bucket refinement)
+            ", cd :{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:false,  facet:{  qw:{type:terms, field:${qw_s}, limit:1, overrequest:0, refine:false, missing:true,   facet:{qq:{query:'*:*'}}   }  }}" +
+            ", cd2:{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true ,  facet:{  qw:{type:terms, field:${qw_s}, limit:1, overrequest:0, refine:true , missing:true,   facet:{qq:{query:'*:*'}}   }  }}" +
+
+            "}"
+        )
+        , "facets=={ count:8" +
+            ", ab:{ buckets:[  {val:A, count:3, zz:{buckets:[], missing:{count:3}}}]  }" +
+            ",ab2:{ buckets:[  {val:A, count:4, zz:{buckets:[], missing:{count:4}}}]  }" +
+            ", cd:{ buckets:[  {val:A, count:3,  qw:{buckets:[{val:Q, count:1, qq:{count:1}}], missing:{count:1,qq:{count:1}}}}]  }" +
+            ",cd2:{ buckets:[  {val:A, count:4,  qw:{buckets:[{val:Q, count:1, qq:{count:1}}], missing:{count:2,qq:{count:2}}}}]  }" +
+            "}"
+    );
+
 
 
   }


[18/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10349: Add totalTermFreq support to TermsComponent

Posted by ab...@apache.org.
SOLR-10349: Add totalTermFreq support to TermsComponent

TermsComponent only returns docFreq information per requested term.
This commit adds a terms.ttf parameter, which if set to true, will
return both docFreq and totalTermFreq statistics for each requested
term.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/deddc9b5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/deddc9b5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/deddc9b5

Branch: refs/heads/jira/solr-9959
Commit: deddc9b5c8d8c2859469583fa8b956be48efff82
Parents: 144091a
Author: Shai Erera <sh...@apache.org>
Authored: Thu Mar 23 08:28:05 2017 +0200
Committer: Shai Erera <sh...@apache.org>
Committed: Wed Mar 29 06:18:39 2017 +0300

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  6 +-
 .../solr/handler/component/TermsComponent.java  | 66 ++++++++++++--------
 .../DistributedTermsComponentTest.java          |  3 +-
 .../handler/component/TermsComponentTest.java   | 38 +++++++++++
 .../client/solrj/response/QueryResponse.java    |  6 +-
 .../client/solrj/response/TermsResponse.java    | 37 +++++++++--
 .../apache/solr/common/params/TermsParams.java  | 12 ++--
 7 files changed, 126 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/deddc9b5/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 9d14e59..4e63926 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -115,17 +115,19 @@ Detailed Change List
 New Features
 ----------------------
 
-* SOLR-9992: Add support for grouping with PointFIelds. (Cao Manh Dat) 
+* SOLR-9992: Add support for grouping with PointFIelds. (Cao Manh Dat)
 
 * SOLR-10046: Add UninvertDocValuesMergePolicyFactory class. (Keith Laban, Christine Poerschke)
 
-* SOLR-9994: Add support for CollapseQParser with PointFields. (Varun Thacker, Cao Manh Dat) 
+* SOLR-9994: Add support for CollapseQParser with PointFields. (Varun Thacker, Cao Manh Dat)
 
 * SOLR-10076: Hide keystore and truststore passwords from /admin/info/* outputs. (Mano Kovacs via Mark Miller)
 
 * SOLR-6736: Adding support for uploading zipped configsets using ConfigSets API (Varun Rajput, Ishan Chattopadhyaya,
   Noble Paul, Anshum Gupta, Gregory Chanan)
 
+* SOLR-10349: Add totalTermFreq support to TermsComponent. (Shai Erera)
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/deddc9b5/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
index e00120c..b05939e 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
@@ -108,8 +108,9 @@ public class TermsComponent extends SearchComponent {
     }
 
     String termList = params.get(TermsParams.TERMS_LIST);
-    if(termList != null) {
-      fetchTerms(rb.req.getSearcher(), fields, termList, termsResult);
+    if (termList != null) {
+      boolean includeTotalTermFreq = params.getBool(TermsParams.TERMS_TTF, false);
+      fetchTerms(rb.req.getSearcher(), fields, termList, includeTotalTermFreq, termsResult);
       return;
     }
 
@@ -303,7 +304,7 @@ public class TermsComponent extends SearchComponent {
     if (th != null) {
       for (ShardResponse srsp : sreq.responses) {
         @SuppressWarnings("unchecked")
-        NamedList<NamedList<Number>> terms = (NamedList<NamedList<Number>>) srsp.getSolrResponse().getResponse().get("terms");
+        NamedList<NamedList<Object>> terms = (NamedList<NamedList<Object>>) srsp.getSolrResponse().getResponse().get("terms");
         th.parse(terms);
 
 
@@ -376,7 +377,7 @@ public class TermsComponent extends SearchComponent {
       }
     }
 
-    public void parse(NamedList<NamedList<Number>> terms) {
+    public void parse(NamedList<NamedList<Object>> terms) {
       // exit if there is no terms
       if (terms == null) {
         return;
@@ -400,6 +401,7 @@ public class TermsComponent extends SearchComponent {
           if (termmap.containsKey(term)) {
             TermsResponse.Term oldtc = termmap.get(term);
             oldtc.addFrequency(tc.getFrequency());
+            oldtc.addTotalTermFreq(tc.getTotalTermFreq());
             termmap.put(term, oldtc);
           } else {
             termmap.put(term, tc);
@@ -442,7 +444,7 @@ public class TermsComponent extends SearchComponent {
 
       // loop though each field we want terms from
       for (String key : fieldmap.keySet()) {
-        NamedList<Number> fieldterms = new SimpleOrderedMap<>();
+        NamedList<Object> fieldterms = new SimpleOrderedMap<>();
         TermsResponse.Term[] data = null;
         if (sort) {
           data = getCountSorted(fieldmap.get(key));
@@ -450,11 +452,19 @@ public class TermsComponent extends SearchComponent {
           data = getLexSorted(fieldmap.get(key));
         }
 
+        boolean includeTotalTermFreq = params.getBool(TermsParams.TERMS_TTF, false);
         // loop though each term until we hit limit
         int cnt = 0;
         for (TermsResponse.Term tc : data) {
           if (tc.getFrequency() >= freqmin && tc.getFrequency() <= freqmax) {
-            fieldterms.add(tc.getTerm(), num(tc.getFrequency()));
+            if (includeTotalTermFreq) {
+              NamedList<Number> termStats = new SimpleOrderedMap<>();
+              termStats.add("docFreq", tc.getFrequency());
+              termStats.add("totalTermFreq", tc.getTotalTermFreq());
+              fieldterms.add(tc.getTerm(), termStats);
+            } else {
+              fieldterms.add(tc.getTerm(), num(tc.getFrequency()));
+            }
             cnt++;
           }
 
@@ -508,10 +518,9 @@ public class TermsComponent extends SearchComponent {
   private void fetchTerms(SolrIndexSearcher indexSearcher,
                           String[] fields,
                           String termList,
+                          boolean includeTotalTermFreq,
                           NamedList result) throws IOException {
 
-    NamedList termsMap = new SimpleOrderedMap();
-    List<LeafReaderContext> leaves = indexSearcher.getTopReaderContext().leaves();
     String field = fields[0];
     FieldType fieldType = indexSearcher.getSchema().getField(field).getType();
     String[] splitTerms = termList.split(",");
@@ -521,35 +530,43 @@ public class TermsComponent extends SearchComponent {
     }
 
     Term[] terms = new Term[splitTerms.length];
-    TermContext[] termContexts = new TermContext[terms.length];
     for(int i=0; i<splitTerms.length; i++) {
       terms[i] = new Term(field, fieldType.readableToIndexed(splitTerms[i]));
     }
 
     Arrays.sort(terms);
 
-    collectTermContext(indexSearcher.getTopReaderContext().reader(), leaves, termContexts, terms);
+    IndexReaderContext topReaderContext = indexSearcher.getTopReaderContext();
+    TermContext[] termContexts = new TermContext[terms.length];
+    collectTermContext(topReaderContext, termContexts, terms);
 
-    for(int i=0; i<terms.length; i++) {
-      if(termContexts[i] != null) {
+    NamedList termsMap = new SimpleOrderedMap();
+    for (int i = 0; i < terms.length; i++) {
+      if (termContexts[i] != null) {
         String outTerm = fieldType.indexedToReadable(terms[i].bytes().utf8ToString());
         int docFreq = termContexts[i].docFreq();
-        termsMap.add(outTerm, docFreq);
+        if (!includeTotalTermFreq) {
+          termsMap.add(outTerm, docFreq);
+        } else {
+          long totalTermFreq = termContexts[i].totalTermFreq();
+          NamedList<Long> termStats = new SimpleOrderedMap<>();
+          termStats.add("docFreq", (long) docFreq);
+          termStats.add("totalTermFreq", totalTermFreq);
+          termsMap.add(outTerm, termStats);
+        }
       }
     }
 
     result.add(field, termsMap);
   }
 
-  private void collectTermContext(IndexReader reader,
-                                 List<LeafReaderContext> leaves, TermContext[] contextArray,
-                                 Term[] queryTerms) throws IOException {
+  private void collectTermContext(IndexReaderContext topReaderContext, TermContext[] contextArray, Term[] queryTerms)
+      throws IOException {
     TermsEnum termsEnum = null;
-    for (LeafReaderContext context : leaves) {
+    for (LeafReaderContext context : topReaderContext.leaves()) {
       final Fields fields = context.reader().fields();
       for (int i = 0; i < queryTerms.length; i++) {
         Term term = queryTerms[i];
-        TermContext termContext = contextArray[i];
         final Terms terms = fields.terms(term.field());
         if (terms == null) {
           // field does not exist
@@ -559,18 +576,15 @@ public class TermsComponent extends SearchComponent {
         assert termsEnum != null;
 
         if (termsEnum == TermsEnum.EMPTY) continue;
+
+        TermContext termContext = contextArray[i];
         if (termsEnum.seekExact(term.bytes())) {
           if (termContext == null) {
-            contextArray[i] = new TermContext(reader.getContext(),
-                termsEnum.termState(), context.ord, termsEnum.docFreq(),
-                termsEnum.totalTermFreq());
-          } else {
-            termContext.register(termsEnum.termState(), context.ord,
-                termsEnum.docFreq(), termsEnum.totalTermFreq());
+            termContext = new TermContext(topReaderContext);
+            contextArray[i] = termContext;
           }
-
+          termContext.accumulateStatistics(termsEnum.docFreq(), termsEnum.totalTermFreq());
         }
-
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/deddc9b5/solr/core/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java
index 951cd88..9c90efb 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedTermsComponentTest.java
@@ -52,7 +52,6 @@ public class DistributedTermsComponentTest extends BaseDistributedSearchTestCase
     query("qt", "/terms", "shards.qt", "/terms", "terms", "true", "terms.fl", "b_t", "terms.list", "snake, zebra, ant, bad");
     query("qt", "/terms", "shards.qt", "/terms", "terms", "true", "terms.fl", "foo_i", "terms.list", "2, 3, 1");
     query("qt", "/terms", "shards.qt", "/terms", "terms", "true", "terms.fl", "foo_i", "terms.stats", "true","terms.list", "2, 3, 1");
-
-
+    query("qt", "/terms", "shards.qt", "/terms", "terms", "true", "terms.fl", "b_t", "terms.list", "snake, zebra", "terms.ttf", "true");
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/deddc9b5/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java
index 177881a..7fb5e12 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/TermsComponentTest.java
@@ -18,6 +18,7 @@ package org.apache.solr.handler.component;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.TermsParams;
+import org.apache.solr.request.SolrQueryRequest;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -313,4 +314,41 @@ public class TermsComponentTest extends SolrTestCaseJ4 {
        ,"count(//lst[@name='standardfilt']/*)=3"
     );
   }
+
+  @Test
+  public void testDocFreqAndTotalTermFreq() throws Exception {
+    SolrQueryRequest req = req(
+        "indent","true",
+        "qt", "/terms",
+        "terms", "true",
+        "terms.fl", "standardfilt",
+        "terms.ttf", "true",
+        "terms.list", "snake,spider,shark,ddddd");
+    assertQ(req,
+        "count(//lst[@name='standardfilt']/*)=4",
+        "//lst[@name='standardfilt']/lst[@name='ddddd']/long[@name='docFreq'][.='4']",
+        "//lst[@name='standardfilt']/lst[@name='ddddd']/long[@name='totalTermFreq'][.='4']",
+        "//lst[@name='standardfilt']/lst[@name='shark']/long[@name='docFreq'][.='2']",
+        "//lst[@name='standardfilt']/lst[@name='shark']/long[@name='totalTermFreq'][.='2']",
+        "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='docFreq'][.='3']",
+        "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='totalTermFreq'][.='3']",
+        "//lst[@name='standardfilt']/lst[@name='spider']/long[@name='docFreq'][.='1']",
+        "//lst[@name='standardfilt']/lst[@name='spider']/long[@name='totalTermFreq'][.='1']");
+  }
+
+  @Test
+  public void testDocFreqAndTotalTermFreqForNonExistingTerm() throws Exception {
+    SolrQueryRequest req = req(
+        "indent","true",
+        "qt", "/terms",
+        "terms", "true",
+        "terms.fl", "standardfilt",
+        "terms.ttf", "true",
+        "terms.list", "boo,snake");
+    assertQ(req,
+        "count(//lst[@name='standardfilt']/*)=1",
+        "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='docFreq'][.='3']",
+        "//lst[@name='standardfilt']/lst[@name='snake']/long[@name='totalTermFreq'][.='3']");
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/deddc9b5/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java
index eb595aa..4e78005 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java
@@ -50,7 +50,7 @@ public class QueryResponse extends SolrResponseBase
   private List<NamedList<Object>> _clusterInfo = null;
   private Map<String,NamedList<Object>> _suggestInfo = null;
   private NamedList<Object> _statsInfo = null;
-  private NamedList<NamedList<Number>> _termsInfo = null;
+  private NamedList<NamedList<Object>> _termsInfo = null;
   private NamedList<SolrDocumentList> _moreLikeThisInfo = null;
   private String _cursorMarkNext = null;
 
@@ -166,7 +166,7 @@ public class QueryResponse extends SolrResponseBase
         extractStatsInfo( _statsInfo );
       }
       else if ( "terms".equals( n ) ) {
-        _termsInfo = (NamedList<NamedList<Number>>) res.getVal( i );
+        _termsInfo = (NamedList<NamedList<Object>>) res.getVal( i );
         extractTermsInfo( _termsInfo );
       }
       else if ( "moreLikeThis".equals( n ) ) {
@@ -191,7 +191,7 @@ public class QueryResponse extends SolrResponseBase
     _suggestResponse = new SuggesterResponse(suggestInfo);
   }
 
-  private void extractTermsInfo(NamedList<NamedList<Number>> termsInfo) {
+  private void extractTermsInfo(NamedList<NamedList<Object>> termsInfo) {
     _termsResponse = new TermsResponse(termsInfo);
   }
   

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/deddc9b5/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java
index e3fb061..b4ee553 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java
@@ -28,17 +28,26 @@ import java.util.Map;
 public class TermsResponse {
   private Map<String, List<Term>> termMap = new HashMap<>();
   
-  public TermsResponse(NamedList<NamedList<Number>> termsInfo) {
+  public TermsResponse(NamedList<NamedList<Object>> termsInfo) {
     for (int i = 0; i < termsInfo.size(); i++) {
       String fieldName = termsInfo.getName(i);
       List<Term> itemList = new ArrayList<>();
-      NamedList<Number> items = termsInfo.getVal(i);
+      NamedList<Object> items = termsInfo.getVal(i);
       
       for (int j = 0; j < items.size(); j++) {
-        Term t = new Term(items.getName(j), items.getVal(j).longValue());
+        String term = items.getName(j);
+        Object val = items.getVal(j);
+        Term t;
+        if (val instanceof NamedList) {
+          @SuppressWarnings("unchecked")
+          NamedList<Number> termStats = (NamedList<Number>) val;
+          t = new Term(term, termStats.get("docFreq").longValue(), termStats.get("totalTermFreq").longValue());
+        } else {
+          t = new Term(term, ((Number) val).longValue());
+        }
         itemList.add(t);
       }
-      
+
       termMap.put(fieldName, itemList);
     }
   }
@@ -59,10 +68,16 @@ public class TermsResponse {
   public static class Term {
     private String term;
     private long frequency;
+    private long totalTermFreq;
 
     public Term(String term, long frequency) {
+      this(term, frequency, 0);
+    }
+
+    public Term(String term, long frequency, long totalTermFreq) {
       this.term = term;
       this.frequency = frequency;
+      this.totalTermFreq = totalTermFreq;
     }
 
     public String getTerm() {
@@ -80,9 +95,21 @@ public class TermsResponse {
     public void setFrequency(long frequency) {
       this.frequency = frequency;
     }
-    
+
     public void addFrequency(long frequency) {
       this.frequency += frequency;
     }
+
+    public long getTotalTermFreq() {
+      return totalTermFreq;
+    }
+
+    public void setTotalTermFreq(long totalTermFreq) {
+      this.totalTermFreq = totalTermFreq;
+    }
+
+    public void addTotalTermFreq(long totalTermFreq) {
+      this.totalTermFreq += totalTermFreq;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/deddc9b5/solr/solrj/src/java/org/apache/solr/common/params/TermsParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/TermsParams.java b/solr/solrj/src/java/org/apache/solr/common/params/TermsParams.java
index 4975846..9f96a80 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/TermsParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/TermsParams.java
@@ -42,17 +42,20 @@ public interface TermsParams {
 
   /**
    * Optional. The list of terms to be retrieved.
-   *
    */
   public static final String TERMS_LIST = TERMS_PREFIX + "list";
 
   /**
-   * Optional. The list of terms to be retrieved.
-   *
+   * Optional. If true, also returns index-level statistics, such as numDocs.
    */
   public static final String TERMS_STATS = TERMS_PREFIX + "stats";
 
   /**
+   * Optional. If true, also returns terms' total term frequency.
+   */
+  public static final String TERMS_TTF = TERMS_PREFIX + "ttf";
+
+  /**
    * Optional.  The lower bound term to start at.  The TermEnum will start at the next term after this term in the dictionary.
    *
    * If not specified, the empty string is used
@@ -107,10 +110,11 @@ public interface TermsParams {
       }
   }
 
-    /**
+  /**
    * Optional.  The minimum value of docFreq to be returned.  1 by default
    */
   public static final String TERMS_MINCOUNT = TERMS_PREFIX + "mincount";
+
   /**
    * Optional.  The maximum value of docFreq to be returned.  -1 by default means no boundary
    */


[40/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-7383: Replace DIH 'rss' example with 'atom' rss example was broken for multiple reasons. atom example showcases the same - and more - features and uses the smallest config file needed to make it work.

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_es.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_es.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_es.txt
deleted file mode 100644
index 487d78c..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_es.txt
+++ /dev/null
@@ -1,356 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/spanish/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Spanish stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
-
- | The following is a ranked list (commonest to rarest) of stopwords
- | deriving from a large sample of text.
-
- | Extra words have been added at the end.
-
-de             |  from, of
-la             |  the, her
-que            |  who, that
-el             |  the
-en             |  in
-y              |  and
-a              |  to
-los            |  the, them
-del            |  de + el
-se             |  himself, from him etc
-las            |  the, them
-por            |  for, by, etc
-un             |  a
-para           |  for
-con            |  with
-no             |  no
-una            |  a
-su             |  his, her
-al             |  a + el
-  | es         from SER
-lo             |  him
-como           |  how
-m�s            |  more
-pero           |  pero
-sus            |  su plural
-le             |  to him, her
-ya             |  already
-o              |  or
-  | fue        from SER
-este           |  this
-  | ha         from HABER
-s�             |  himself etc
-porque         |  because
-esta           |  this
-  | son        from SER
-entre          |  between
-  | est�     from ESTAR
-cuando         |  when
-muy            |  very
-sin            |  without
-sobre          |  on
-  | ser        from SER
-  | tiene      from TENER
-tambi�n        |  also
-me             |  me
-hasta          |  until
-hay            |  there is/are
-donde          |  where
-  | han        from HABER
-quien          |  whom, that
-  | est�n      from ESTAR
-  | estado     from ESTAR
-desde          |  from
-todo           |  all
-nos            |  us
-durante        |  during
-  | estados    from ESTAR
-todos          |  all
-uno            |  a
-les            |  to them
-ni             |  nor
-contra         |  against
-otros          |  other
-  | fueron     from SER
-ese            |  that
-eso            |  that
-  | hab�a      from HABER
-ante           |  before
-ellos          |  they
-e              |  and (variant of y)
-esto           |  this
-m�             |  me
-antes          |  before
-algunos        |  some
-qu�            |  what?
-unos           |  a
-yo             |  I
-otro           |  other
-otras          |  other
-otra           |  other
-�l             |  he
-tanto          |  so much, many
-esa            |  that
-estos          |  these
-mucho          |  much, many
-quienes        |  who
-nada           |  nothing
-muchos         |  many
-cual           |  who
-  | sea        from SER
-poco           |  few
-ella           |  she
-estar          |  to be
-  | haber      from HABER
-estas          |  these
-  | estaba     from ESTAR
-  | estamos    from ESTAR
-algunas        |  some
-algo           |  something
-nosotros       |  we
-
-      | other forms
-
-mi             |  me
-mis            |  mi plural
-t�             |  thou
-te             |  thee
-ti             |  thee
-tu             |  thy
-tus            |  tu plural
-ellas          |  they
-nosotras       |  we
-vosotros       |  you
-vosotras       |  you
-os             |  you
-m�o            |  mine
-m�a            |
-m�os           |
-m�as           |
-tuyo           |  thine
-tuya           |
-tuyos          |
-tuyas          |
-suyo           |  his, hers, theirs
-suya           |
-suyos          |
-suyas          |
-nuestro        |  ours
-nuestra        |
-nuestros       |
-nuestras       |
-vuestro        |  yours
-vuestra        |
-vuestros       |
-vuestras       |
-esos           |  those
-esas           |  those
-
-               | forms of estar, to be (not including the infinitive):
-estoy
-est�s
-est�
-estamos
-est�is
-est�n
-est�
-est�s
-estemos
-est�is
-est�n
-estar�
-estar�s
-estar�
-estaremos
-estar�is
-estar�n
-estar�a
-estar�as
-estar�amos
-estar�ais
-estar�an
-estaba
-estabas
-est�bamos
-estabais
-estaban
-estuve
-estuviste
-estuvo
-estuvimos
-estuvisteis
-estuvieron
-estuviera
-estuvieras
-estuvi�ramos
-estuvierais
-estuvieran
-estuviese
-estuvieses
-estuvi�semos
-estuvieseis
-estuviesen
-estando
-estado
-estada
-estados
-estadas
-estad
-
-               | forms of haber, to have (not including the infinitive):
-he
-has
-ha
-hemos
-hab�is
-han
-haya
-hayas
-hayamos
-hay�is
-hayan
-habr�
-habr�s
-habr�
-habremos
-habr�is
-habr�n
-habr�a
-habr�as
-habr�amos
-habr�ais
-habr�an
-hab�a
-hab�as
-hab�amos
-hab�ais
-hab�an
-hube
-hubiste
-hubo
-hubimos
-hubisteis
-hubieron
-hubiera
-hubieras
-hubi�ramos
-hubierais
-hubieran
-hubiese
-hubieses
-hubi�semos
-hubieseis
-hubiesen
-habiendo
-habido
-habida
-habidos
-habidas
-
-               | forms of ser, to be (not including the infinitive):
-soy
-eres
-es
-somos
-sois
-son
-sea
-seas
-seamos
-se�is
-sean
-ser�
-ser�s
-ser�
-seremos
-ser�is
-ser�n
-ser�a
-ser�as
-ser�amos
-ser�ais
-ser�an
-era
-eras
-�ramos
-erais
-eran
-fui
-fuiste
-fue
-fuimos
-fuisteis
-fueron
-fuera
-fueras
-fu�ramos
-fuerais
-fueran
-fuese
-fueses
-fu�semos
-fueseis
-fuesen
-siendo
-sido
-  |  sed also means 'thirst'
-
-               | forms of tener, to have (not including the infinitive):
-tengo
-tienes
-tiene
-tenemos
-ten�is
-tienen
-tenga
-tengas
-tengamos
-teng�is
-tengan
-tendr�
-tendr�s
-tendr�
-tendremos
-tendr�is
-tendr�n
-tendr�a
-tendr�as
-tendr�amos
-tendr�ais
-tendr�an
-ten�a
-ten�as
-ten�amos
-ten�ais
-ten�an
-tuve
-tuviste
-tuvo
-tuvimos
-tuvisteis
-tuvieron
-tuviera
-tuvieras
-tuvi�ramos
-tuvierais
-tuvieran
-tuviese
-tuvieses
-tuvi�semos
-tuvieseis
-tuviesen
-teniendo
-tenido
-tenida
-tenidos
-tenidas
-tened
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_eu.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_eu.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_eu.txt
deleted file mode 100644
index 25f1db9..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_eu.txt
+++ /dev/null
@@ -1,99 +0,0 @@
-# example set of basque stopwords
-al
-anitz
-arabera
-asko
-baina
-bat
-batean
-batek
-bati
-batzuei
-batzuek
-batzuetan
-batzuk
-bera
-beraiek
-berau
-berauek
-bere
-berori
-beroriek
-beste
-bezala
-da
-dago
-dira
-ditu
-du
-dute
-edo
-egin
-ere
-eta
-eurak
-ez
-gainera
-gu
-gutxi
-guzti
-haiei
-haiek
-haietan
-hainbeste
-hala
-han
-handik
-hango
-hara
-hari
-hark
-hartan
-hau
-hauei
-hauek
-hauetan
-hemen
-hemendik
-hemengo
-hi
-hona
-honek
-honela
-honetan
-honi
-hor
-hori
-horiei
-horiek
-horietan
-horko
-horra
-horrek
-horrela
-horretan
-horri
-hortik
-hura
-izan
-ni
-noiz
-nola
-non
-nondik
-nongo
-nor
-nora
-ze
-zein
-zen
-zenbait
-zenbat
-zer
-zergatik
-ziren
-zituen
-zu
-zuek
-zuen
-zuten

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fa.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fa.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fa.txt
deleted file mode 100644
index 723641c..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fa.txt
+++ /dev/null
@@ -1,313 +0,0 @@
-# This file was created by Jacques Savoy and is distributed under the BSD license.
-# See http://members.unine.ch/jacques.savoy/clef/index.html.
-# Also see http://www.opensource.org/licenses/bsd-license.html
-# Note: by default this file is used after normalization, so when adding entries
-# to this file, use the arabic '\u064a' instead of '\u06cc'
-\u0627\u0646\u0627\u0646
-\u0646\u062f\u0627\u0634\u062a\u0647
-\u0633\u0631\u0627\u0633\u0631
-\u062e\u064a\u0627\u0647
-\u0627\u064a\u0634\u0627\u0646
-\u0648\u064a
-\u062a\u0627\u0643\u0646\u0648\u0646
-\u0628\u064a\u0634\u062a\u0631\u064a
-\u062f\u0648\u0645
-\u067e\u0633
-\u0646\u0627\u0634\u064a
-\u0648\u06af\u0648
-\u064a\u0627
-\u062f\u0627\u0634\u062a\u0646\u062f
-\u0633\u067e\u0633
-\u0647\u0646\u06af\u0627\u0645
-\u0647\u0631\u06af\u0632
-\u067e\u0646\u062c
-\u0646\u0634\u0627\u0646
-\u0627\u0645\u0633\u0627\u0644
-\u062f\u064a\u06af\u0631
-\u06af\u0631\u0648\u0647\u064a
-\u0634\u062f\u0646\u062f
-\u0686\u0637\u0648\u0631
-\u062f\u0647
-\u0648
-\u062f\u0648
-\u0646\u062e\u0633\u062a\u064a\u0646
-\u0648\u0644\u064a
-\u0686\u0631\u0627
-\u0686\u0647
-\u0648\u0633\u0637
-\u0647
-\u0643\u062f\u0627\u0645
-\u0642\u0627\u0628\u0644
-\u064a\u0643
-\u0631\u0641\u062a
-\u0647\u0641\u062a
-\u0647\u0645\u0686\u0646\u064a\u0646
-\u062f\u0631
-\u0647\u0632\u0627\u0631
-\u0628\u0644\u0647
-\u0628\u0644\u064a
-\u0634\u0627\u064a\u062f
-\u0627\u0645\u0627
-\u0634\u0646\u0627\u0633\u064a
-\u06af\u0631\u0641\u062a\u0647
-\u062f\u0647\u062f
-\u062f\u0627\u0634\u062a\u0647
-\u062f\u0627\u0646\u0633\u062a
-\u062f\u0627\u0634\u062a\u0646
-\u062e\u0648\u0627\u0647\u064a\u0645
-\u0645\u064a\u0644\u064a\u0627\u0631\u062f
-\u0648\u0642\u062a\u064a\u0643\u0647
-\u0627\u0645\u062f
-\u062e\u0648\u0627\u0647\u062f
-\u062c\u0632
-\u0627\u0648\u0631\u062f\u0647
-\u0634\u062f\u0647
-\u0628\u0644\u0643\u0647
-\u062e\u062f\u0645\u0627\u062a
-\u0634\u062f\u0646
-\u0628\u0631\u062e\u064a
-\u0646\u0628\u0648\u062f
-\u0628\u0633\u064a\u0627\u0631\u064a
-\u062c\u0644\u0648\u06af\u064a\u0631\u064a
-\u062d\u0642
-\u0643\u0631\u062f\u0646\u062f
-\u0646\u0648\u0639\u064a
-\u0628\u0639\u0631\u064a
-\u0646\u0643\u0631\u062f\u0647
-\u0646\u0638\u064a\u0631
-\u0646\u0628\u0627\u064a\u062f
-\u0628\u0648\u062f\u0647
-\u0628\u0648\u062f\u0646
-\u062f\u0627\u062f
-\u0627\u0648\u0631\u062f
-\u0647\u0633\u062a
-\u062c\u0627\u064a\u064a
-\u0634\u0648\u062f
-\u062f\u0646\u0628\u0627\u0644
-\u062f\u0627\u062f\u0647
-\u0628\u0627\u064a\u062f
-\u0633\u0627\u0628\u0642
-\u0647\u064a\u0686
-\u0647\u0645\u0627\u0646
-\u0627\u0646\u062c\u0627
-\u0643\u0645\u062a\u0631
-\u0643\u062c\u0627\u0633\u062a
-\u06af\u0631\u062f\u062f
-\u0643\u0633\u064a
-\u062a\u0631
-\u0645\u0631\u062f\u0645
-\u062a\u0627\u0646
-\u062f\u0627\u062f\u0646
-\u0628\u0648\u062f\u0646\u062f
-\u0633\u0631\u064a
-\u062c\u062f\u0627
-\u0646\u062f\u0627\u0631\u0646\u062f
-\u0645\u06af\u0631
-\u064a\u0643\u062f\u064a\u06af\u0631
-\u062f\u0627\u0631\u062f
-\u062f\u0647\u0646\u062f
-\u0628\u0646\u0627\u0628\u0631\u0627\u064a\u0646
-\u0647\u0646\u06af\u0627\u0645\u064a
-\u0633\u0645\u062a
-\u062c\u0627
-\u0627\u0646\u0686\u0647
-\u062e\u0648\u062f
-\u062f\u0627\u062f\u0646\u062f
-\u0632\u064a\u0627\u062f
-\u062f\u0627\u0631\u0646\u062f
-\u0627\u062b\u0631
-\u0628\u062f\u0648\u0646
-\u0628\u0647\u062a\u0631\u064a\u0646
-\u0628\u064a\u0634\u062a\u0631
-\u0627\u0644\u0628\u062a\u0647
-\u0628\u0647
-\u0628\u0631\u0627\u0633\u0627\u0633
-\u0628\u064a\u0631\u0648\u0646
-\u0643\u0631\u062f
-\u0628\u0639\u0636\u064a
-\u06af\u0631\u0641\u062a
-\u062a\u0648\u064a
-\u0627\u064a
-\u0645\u064a\u0644\u064a\u0648\u0646
-\u0627\u0648
-\u062c\u0631\u064a\u0627\u0646
-\u062a\u0648\u0644
-\u0628\u0631
-\u0645\u0627\u0646\u0646\u062f
-\u0628\u0631\u0627\u0628\u0631
-\u0628\u0627\u0634\u064a\u0645
-\u0645\u062f\u062a\u064a
-\u06af\u0648\u064a\u0646\u062f
-\u0627\u0643\u0646\u0648\u0646
-\u062a\u0627
-\u062a\u0646\u0647\u0627
-\u062c\u062f\u064a\u062f
-\u0686\u0646\u062f
-\u0628\u064a
-\u0646\u0634\u062f\u0647
-\u0643\u0631\u062f\u0646
-\u0643\u0631\u062f\u0645
-\u06af\u0648\u064a\u062f
-\u0643\u0631\u062f\u0647
-\u0643\u0646\u064a\u0645
-\u0646\u0645\u064a
-\u0646\u0632\u062f
-\u0631\u0648\u064a
-\u0642\u0635\u062f
-\u0641\u0642\u0637
-\u0628\u0627\u0644\u0627\u064a
-\u062f\u064a\u06af\u0631\u0627\u0646
-\u0627\u064a\u0646
-\u062f\u064a\u0631\u0648\u0632
-\u062a\u0648\u0633\u0637
-\u0633\u0648\u0645
-\u0627\u064a\u0645
-\u062f\u0627\u0646\u0646\u062f
-\u0633\u0648\u064a
-\u0627\u0633\u062a\u0641\u0627\u062f\u0647
-\u0634\u0645\u0627
-\u0643\u0646\u0627\u0631
-\u062f\u0627\u0631\u064a\u0645
-\u0633\u0627\u062e\u062a\u0647
-\u0637\u0648\u0631
-\u0627\u0645\u062f\u0647
-\u0631\u0641\u062a\u0647
-\u0646\u062e\u0633\u062a
-\u0628\u064a\u0633\u062a
-\u0646\u0632\u062f\u064a\u0643
-\u0637\u064a
-\u0643\u0646\u064a\u062f
-\u0627\u0632
-\u0627\u0646\u0647\u0627
-\u062a\u0645\u0627\u0645\u064a
-\u062f\u0627\u0634\u062a
-\u064a\u0643\u064a
-\u0637\u0631\u064a\u0642
-\u0627\u0634
-\u0686\u064a\u0633\u062a
-\u0631\u0648\u0628
-\u0646\u0645\u0627\u064a\u062f
-\u06af\u0641\u062a
-\u0686\u0646\u062f\u064a\u0646
-\u0686\u064a\u0632\u064a
-\u062a\u0648\u0627\u0646\u062f
-\u0627\u0645
-\u0627\u064a\u0627
-\u0628\u0627
-\u0627\u0646
-\u0627\u064a\u062f
-\u062a\u0631\u064a\u0646
-\u0627\u064a\u0646\u0643\u0647
-\u062f\u064a\u06af\u0631\u064a
-\u0631\u0627\u0647
-\u0647\u0627\u064a\u064a
-\u0628\u0631\u0648\u0632
-\u0647\u0645\u0686\u0646\u0627\u0646
-\u067e\u0627\u0639\u064a\u0646
-\u0643\u0633
-\u062d\u062f\u0648\u062f
-\u0645\u062e\u062a\u0644\u0641
-\u0645\u0642\u0627\u0628\u0644
-\u0686\u064a\u0632
-\u06af\u064a\u0631\u062f
-\u0646\u062f\u0627\u0631\u062f
-\u0636\u062f
-\u0647\u0645\u0686\u0648\u0646
-\u0633\u0627\u0632\u064a
-\u0634\u0627\u0646
-\u0645\u0648\u0631\u062f
-\u0628\u0627\u0631\u0647
-\u0645\u0631\u0633\u064a
-\u062e\u0648\u064a\u0634
-\u0628\u0631\u062e\u0648\u0631\u062f\u0627\u0631
-\u0686\u0648\u0646
-\u062e\u0627\u0631\u062c
-\u0634\u0634
-\u0647\u0646\u0648\u0632
-\u062a\u062d\u062a
-\u0636\u0645\u0646
-\u0647\u0633\u062a\u064a\u0645
-\u06af\u0641\u062a\u0647
-\u0641\u0643\u0631
-\u0628\u0633\u064a\u0627\u0631
-\u067e\u064a\u0634
-\u0628\u0631\u0627\u064a
-\u0631\u0648\u0632\u0647\u0627\u064a
-\u0627\u0646\u0643\u0647
-\u0646\u062e\u0648\u0627\u0647\u062f
-\u0628\u0627\u0644\u0627
-\u0643\u0644
-\u0648\u0642\u062a\u064a
-\u0643\u064a
-\u0686\u0646\u064a\u0646
-\u0643\u0647
-\u06af\u064a\u0631\u064a
-\u0646\u064a\u0633\u062a
-\u0627\u0633\u062a
-\u0643\u062c\u0627
-\u0643\u0646\u062f
-\u0646\u064a\u0632
-\u064a\u0627\u0628\u062f
-\u0628\u0646\u062f\u064a
-\u062d\u062a\u064a
-\u062a\u0648\u0627\u0646\u0646\u062f
-\u0639\u0642\u0628
-\u062e\u0648\u0627\u0633\u062a
-\u0643\u0646\u0646\u062f
-\u0628\u064a\u0646
-\u062a\u0645\u0627\u0645
-\u0647\u0645\u0647
-\u0645\u0627
-\u0628\u0627\u0634\u0646\u062f
-\u0645\u062b\u0644
-\u0634\u062f
-\u0627\u0631\u064a
-\u0628\u0627\u0634\u062f
-\u0627\u0631\u0647
-\u0637\u0628\u0642
-\u0628\u0639\u062f
-\u0627\u06af\u0631
-\u0635\u0648\u0631\u062a
-\u063a\u064a\u0631
-\u062c\u0627\u064a
-\u0628\u064a\u0634
-\u0631\u064a\u0632\u064a
-\u0627\u0646\u062f
-\u0632\u064a\u0631\u0627
-\u0686\u06af\u0648\u0646\u0647
-\u0628\u0627\u0631
-\u0644\u0637\u0641\u0627
-\u0645\u064a
-\u062f\u0631\u0628\u0627\u0631\u0647
-\u0645\u0646
-\u062f\u064a\u062f\u0647
-\u0647\u0645\u064a\u0646
-\u06af\u0630\u0627\u0631\u064a
-\u0628\u0631\u062f\u0627\u0631\u064a
-\u0639\u0644\u062a
-\u06af\u0630\u0627\u0634\u062a\u0647
-\u0647\u0645
-\u0641\u0648\u0642
-\u0646\u0647
-\u0647\u0627
-\u0634\u0648\u0646\u062f
-\u0627\u0628\u0627\u062f
-\u0647\u0645\u0648\u0627\u0631\u0647
-\u0647\u0631
-\u0627\u0648\u0644
-\u062e\u0648\u0627\u0647\u0646\u062f
-\u0686\u0647\u0627\u0631
-\u0646\u0627\u0645
-\u0627\u0645\u0631\u0648\u0632
-\u0645\u0627\u0646
-\u0647\u0627\u064a
-\u0642\u0628\u0644
-\u0643\u0646\u0645
-\u0633\u0639\u064a
-\u062a\u0627\u0632\u0647
-\u0631\u0627
-\u0647\u0633\u062a\u0646\u062f
-\u0632\u064a\u0631
-\u062c\u0644\u0648\u064a
-\u0639\u0646\u0648\u0627\u0646
-\u0628\u0648\u062f

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fi.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fi.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fi.txt
deleted file mode 100644
index 4372c9a..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fi.txt
+++ /dev/null
@@ -1,97 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/finnish/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
- 
-| forms of BE
-
-olla
-olen
-olet
-on
-olemme
-olette
-ovat
-ole        | negative form
-
-oli
-olisi
-olisit
-olisin
-olisimme
-olisitte
-olisivat
-olit
-olin
-olimme
-olitte
-olivat
-ollut
-olleet
-
-en         | negation
-et
-ei
-emme
-ette
-eiv�t
-
-|Nom   Gen    Acc    Part   Iness   Elat    Illat  Adess   Ablat   Allat   Ess    Trans
-min�   minun  minut  minua  minussa minusta minuun minulla minulta minulle               | I
-sin�   sinun  sinut  sinua  sinussa sinusta sinuun sinulla sinulta sinulle               | you
-h�n    h�nen  h�net  h�nt�  h�ness� h�nest� h�neen h�nell� h�nelt� h�nelle               | he she
-me     meid�n meid�t meit�  meiss�  meist�  meihin meill�  meilt�  meille                | we
-te     teid�n teid�t teit�  teiss�  teist�  teihin teill�  teilt�  teille                | you
-he     heid�n heid�t heit�  heiss�  heist�  heihin heill�  heilt�  heille                | they
-
-t�m�   t�m�n         t�t�   t�ss�   t�st�   t�h�n  tall�   t�lt�   t�lle   t�n�   t�ksi  | this
-tuo    tuon          tuot�  tuossa  tuosta  tuohon tuolla  tuolta  tuolle  tuona  tuoksi | that
-se     sen           sit�   siin�   siit�   siihen sill�   silt�   sille   sin�   siksi  | it
-n�m�   n�iden        n�it�  n�iss�  n�ist�  n�ihin n�ill�  n�ilt�  n�ille  n�in�  n�iksi | these
-nuo    noiden        noita  noissa  noista  noihin noilla  noilta  noille  noina  noiksi | those
-ne     niiden        niit�  niiss�  niist�  niihin niill�  niilt�  niille  niin�  niiksi | they
-
-kuka   kenen kenet   ket�   keness� kenest� keneen kenell� kenelt� kenelle kenen� keneksi| who
-ketk�  keiden ketk�  keit�  keiss�  keist�  keihin keill�  keilt�  keille  kein�  keiksi | (pl)
-mik�   mink� mink�   mit�   miss�   mist�   mihin  mill�   milt�   mille   min�   miksi  | which what
-mitk�                                                                                    | (pl)
-
-joka   jonka         jota   jossa   josta   johon  jolla   jolta   jolle   jona   joksi  | who which
-jotka  joiden        joita  joissa  joista  joihin joilla  joilta  joille  joina  joiksi | (pl)
-
-| conjunctions
-
-ett�   | that
-ja     | and
-jos    | if
-koska  | because
-kuin   | than
-mutta  | but
-niin   | so
-sek�   | and
-sill�  | for
-tai    | or
-vaan   | but
-vai    | or
-vaikka | although
-
-
-| prepositions
-
-kanssa  | with
-mukaan  | according to
-noin    | about
-poikki  | across
-yli     | over, across
-
-| other
-
-kun    | when
-niin   | so
-nyt    | now
-itse   | self
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fr.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fr.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fr.txt
deleted file mode 100644
index 749abae..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_fr.txt
+++ /dev/null
@@ -1,186 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/french/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A French stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
-au             |  a + le
-aux            |  a + les
-avec           |  with
-ce             |  this
-ces            |  these
-dans           |  with
-de             |  of
-des            |  de + les
-du             |  de + le
-elle           |  she
-en             |  `of them' etc
-et             |  and
-eux            |  them
-il             |  he
-je             |  I
-la             |  the
-le             |  the
-leur           |  their
-lui            |  him
-ma             |  my (fem)
-mais           |  but
-me             |  me
-m�me           |  same; as in moi-m�me (myself) etc
-mes            |  me (pl)
-moi            |  me
-mon            |  my (masc)
-ne             |  not
-nos            |  our (pl)
-notre          |  our
-nous           |  we
-on             |  one
-ou             |  where
-par            |  by
-pas            |  not
-pour           |  for
-qu             |  que before vowel
-que            |  that
-qui            |  who
-sa             |  his, her (fem)
-se             |  oneself
-ses            |  his (pl)
-son            |  his, her (masc)
-sur            |  on
-ta             |  thy (fem)
-te             |  thee
-tes            |  thy (pl)
-toi            |  thee
-ton            |  thy (masc)
-tu             |  thou
-un             |  a
-une            |  a
-vos            |  your (pl)
-votre          |  your
-vous           |  you
-
-               |  single letter forms
-
-c              |  c'
-d              |  d'
-j              |  j'
-l              |  l'
-�              |  to, at
-m              |  m'
-n              |  n'
-s              |  s'
-t              |  t'
-y              |  there
-
-               | forms of �tre (not including the infinitive):
-�t�
-�t�e
-�t�es
-�t�s
-�tant
-suis
-es
-est
-sommes
-�tes
-sont
-serai
-seras
-sera
-serons
-serez
-seront
-serais
-serait
-serions
-seriez
-seraient
-�tais
-�tait
-�tions
-�tiez
-�taient
-fus
-fut
-f�mes
-f�tes
-furent
-sois
-soit
-soyons
-soyez
-soient
-fusse
-fusses
-f�t
-fussions
-fussiez
-fussent
-
-               | forms of avoir (not including the infinitive):
-ayant
-eu
-eue
-eues
-eus
-ai
-as
-avons
-avez
-ont
-aurai
-auras
-aura
-aurons
-aurez
-auront
-aurais
-aurait
-aurions
-auriez
-auraient
-avais
-avait
-avions
-aviez
-avaient
-eut
-e�mes
-e�tes
-eurent
-aie
-aies
-ait
-ayons
-ayez
-aient
-eusse
-eusses
-e�t
-eussions
-eussiez
-eussent
-
-               | Later additions (from Jean-Christophe Deschamps)
-ceci           |  this
-cela           |  that
-cel�           |  that
-cet            |  this
-cette          |  this
-ici            |  here
-ils            |  they
-les            |  the (pl)
-leurs          |  their (pl)
-quel           |  which
-quels          |  which
-quelle         |  which
-quelles        |  which
-sans           |  without
-soi            |  oneself
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ga.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ga.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ga.txt
deleted file mode 100644
index 9ff88d7..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ga.txt
+++ /dev/null
@@ -1,110 +0,0 @@
-
-a
-ach
-ag
-agus
-an
-aon
-ar
-arna
-as
-b'
-ba
-beirt
-bh�r
-caoga
-ceathair
-ceathrar
-chomh
-cht�
-chuig
-chun
-cois
-c�ad
-c�ig
-c�igear
-d'
-daichead
-dar
-de
-deich
-deichni�r
-den
-dh�
-do
-don
-dt�
-d�
-d�r
-d�
-faoi
-faoin
-faoina
-faoin�r
-fara
-fiche
-gach
-gan
-go
-gur
-haon
-hocht
-i
-iad
-idir
-in
-ina
-ins
-in�r
-is
-le
-leis
-lena
-len�r
-m'
-mar
-mo
-m�
-na
-nach
-naoi
-naon�r
-n�
-n�
-n�or
-n�
-n�cha
-ocht
-ochtar
-os
-roimh
-sa
-seacht
-seachtar
-seacht�
-seasca
-seisear
-siad
-sibh
-sinn
-sna
-s�
-s�
-tar
-thar
-th�
-tri�r
-tr�
-tr�na
-tr�n�r
-tr�ocha
-t�
-um
-�r
-�
-�is
-�
-�
-�n
-�na
-�n�r

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_gl.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_gl.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_gl.txt
deleted file mode 100644
index d8760b1..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_gl.txt
+++ /dev/null
@@ -1,161 +0,0 @@
-# galican stopwords
-a
-a�nda
-al�
-aquel
-aquela
-aquelas
-aqueles
-aquilo
-aqu�
-ao
-aos
-as
-as�
-�
-ben
-cando
-che
-co
-coa
-comigo
-con
-connosco
-contigo
-convosco
-coas
-cos
-cun
-cuns
-cunha
-cunhas
-da
-dalgunha
-dalgunhas
-dalg�n
-dalg�ns
-das
-de
-del
-dela
-delas
-deles
-desde
-deste
-do
-dos
-dun
-duns
-dunha
-dunhas
-e
-el
-ela
-elas
-eles
-en
-era
-eran
-esa
-esas
-ese
-eses
-esta
-estar
-estaba
-est�
-est�n
-este
-estes
-estiven
-estou
-eu
-�
-facer
-foi
-foron
-fun
-hab�a
-hai
-iso
-isto
-la
-las
-lle
-lles
-lo
-los
-mais
-me
-meu
-meus
-min
-mi�a
-mi�as
-moi
-na
-nas
-neste
-nin
-no
-non
-nos
-nosa
-nosas
-noso
-nosos
-n�s
-nun
-nunha
-nuns
-nunhas
-o
-os
-ou
-�
-�s
-para
-pero
-pode
-pois
-pola
-polas
-polo
-polos
-por
-que
-se
-sen�n
-ser
-seu
-seus
-sexa
-sido
-sobre
-s�a
-s�as
-tam�n
-tan
-te
-ten
-te�en
-te�o
-ter
-teu
-teus
-ti
-tido
-ti�a
-tiven
-t�a
-t�as
-un
-unha
-unhas
-uns
-vos
-vosa
-vosas
-voso
-vosos
-v�s

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hi.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hi.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hi.txt
deleted file mode 100644
index 86286bb..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hi.txt
+++ /dev/null
@@ -1,235 +0,0 @@
-# Also see http://www.opensource.org/licenses/bsd-license.html
-# See http://members.unine.ch/jacques.savoy/clef/index.html.
-# This file was created by Jacques Savoy and is distributed under the BSD license.
-# Note: by default this file also contains forms normalized by HindiNormalizer 
-# for spelling variation (see section below), such that it can be used whether or 
-# not you enable that feature. When adding additional entries to this list,
-# please add the normalized form as well. 
-\u0905\u0902\u0926\u0930
-\u0905\u0924
-\u0905\u092a\u0928\u093e
-\u0905\u092a\u0928\u0940
-\u0905\u092a\u0928\u0947
-\u0905\u092d\u0940
-\u0906\u0926\u093f
-\u0906\u092a
-\u0907\u0924\u094d\u092f\u093e\u0926\u093f
-\u0907\u0928 
-\u0907\u0928\u0915\u093e
-\u0907\u0928\u094d\u0939\u0940\u0902
-\u0907\u0928\u094d\u0939\u0947\u0902
-\u0907\u0928\u094d\u0939\u094b\u0902
-\u0907\u0938
-\u0907\u0938\u0915\u093e
-\u0907\u0938\u0915\u0940
-\u0907\u0938\u0915\u0947
-\u0907\u0938\u092e\u0947\u0902
-\u0907\u0938\u0940
-\u0907\u0938\u0947
-\u0909\u0928
-\u0909\u0928\u0915\u093e
-\u0909\u0928\u0915\u0940
-\u0909\u0928\u0915\u0947
-\u0909\u0928\u0915\u094b
-\u0909\u0928\u094d\u0939\u0940\u0902
-\u0909\u0928\u094d\u0939\u0947\u0902
-\u0909\u0928\u094d\u0939\u094b\u0902
-\u0909\u0938
-\u0909\u0938\u0915\u0947
-\u0909\u0938\u0940
-\u0909\u0938\u0947
-\u090f\u0915
-\u090f\u0935\u0902
-\u090f\u0938
-\u0910\u0938\u0947
-\u0914\u0930
-\u0915\u0908
-\u0915\u0930
-\u0915\u0930\u0924\u093e
-\u0915\u0930\u0924\u0947
-\u0915\u0930\u0928\u093e
-\u0915\u0930\u0928\u0947
-\u0915\u0930\u0947\u0902
-\u0915\u0939\u0924\u0947
-\u0915\u0939\u093e
-\u0915\u093e
-\u0915\u093e\u095e\u0940
-\u0915\u093f
-\u0915\u093f\u0924\u0928\u093e
-\u0915\u093f\u0928\u094d\u0939\u0947\u0902
-\u0915\u093f\u0928\u094d\u0939\u094b\u0902
-\u0915\u093f\u092f\u093e
-\u0915\u093f\u0930
-\u0915\u093f\u0938
-\u0915\u093f\u0938\u0940
-\u0915\u093f\u0938\u0947
-\u0915\u0940
-\u0915\u0941\u091b
-\u0915\u0941\u0932
-\u0915\u0947
-\u0915\u094b
-\u0915\u094b\u0908
-\u0915\u094c\u0928
-\u0915\u094c\u0928\u0938\u093e
-\u0917\u092f\u093e
-\u0918\u0930
-\u091c\u092c
-\u091c\u0939\u093e\u0901
-\u091c\u093e
-\u091c\u093f\u0924\u0928\u093e
-\u091c\u093f\u0928
-\u091c\u093f\u0928\u094d\u0939\u0947\u0902
-\u091c\u093f\u0928\u094d\u0939\u094b\u0902
-\u091c\u093f\u0938
-\u091c\u093f\u0938\u0947
-\u091c\u0940\u0927\u0930
-\u091c\u0948\u0938\u093e
-\u091c\u0948\u0938\u0947
-\u091c\u094b
-\u0924\u0915
-\u0924\u092c
-\u0924\u0930\u0939
-\u0924\u093f\u0928
-\u0924\u093f\u0928\u094d\u0939\u0947\u0902
-\u0924\u093f\u0928\u094d\u0939\u094b\u0902
-\u0924\u093f\u0938
-\u0924\u093f\u0938\u0947
-\u0924\u094b
-\u0925\u093e
-\u0925\u0940
-\u0925\u0947
-\u0926\u092c\u093e\u0930\u093e
-\u0926\u093f\u092f\u093e
-\u0926\u0941\u0938\u0930\u093e
-\u0926\u0942\u0938\u0930\u0947
-\u0926\u094b
-\u0926\u094d\u0935\u093e\u0930\u093e
-\u0928
-\u0928\u0939\u0940\u0902
-\u0928\u093e
-\u0928\u093f\u0939\u093e\u092f\u0924
-\u0928\u0940\u091a\u0947
-\u0928\u0947
-\u092a\u0930
-\u092a\u0930  
-\u092a\u0939\u0932\u0947
-\u092a\u0942\u0930\u093e
-\u092a\u0947
-\u092b\u093f\u0930
-\u092c\u0928\u0940
-\u092c\u0939\u0940
-\u092c\u0939\u0941\u0924
-\u092c\u093e\u0926
-\u092c\u093e\u0932\u093e
-\u092c\u093f\u0932\u0915\u0941\u0932
-\u092d\u0940
-\u092d\u0940\u0924\u0930
-\u092e\u0917\u0930
-\u092e\u093e\u0928\u094b
-\u092e\u0947
-\u092e\u0947\u0902
-\u092f\u0926\u093f
-\u092f\u0939
-\u092f\u0939\u093e\u0901
-\u092f\u0939\u0940
-\u092f\u093e
-\u092f\u093f\u0939 
-\u092f\u0947
-\u0930\u0916\u0947\u0902
-\u0930\u0939\u093e
-\u0930\u0939\u0947
-\u0931\u094d\u0935\u093e\u0938\u093e
-\u0932\u093f\u090f
-\u0932\u093f\u092f\u0947
-\u0932\u0947\u0915\u093f\u0928
-\u0935
-\u0935\u0930\u094d\u0917
-\u0935\u0939
-\u0935\u0939 
-\u0935\u0939\u093e\u0901
-\u0935\u0939\u0940\u0902
-\u0935\u093e\u0932\u0947
-\u0935\u0941\u0939 
-\u0935\u0947
-\u0935\u095a\u0948\u0930\u0939
-\u0938\u0902\u0917
-\u0938\u0915\u0924\u093e
-\u0938\u0915\u0924\u0947
-\u0938\u092c\u0938\u0947
-\u0938\u092d\u0940
-\u0938\u093e\u0925
-\u0938\u093e\u092c\u0941\u0924
-\u0938\u093e\u092d
-\u0938\u093e\u0930\u093e
-\u0938\u0947
-\u0938\u094b
-\u0939\u0940
-\u0939\u0941\u0906
-\u0939\u0941\u0908
-\u0939\u0941\u090f
-\u0939\u0948
-\u0939\u0948\u0902
-\u0939\u094b
-\u0939\u094b\u0924\u093e
-\u0939\u094b\u0924\u0940
-\u0939\u094b\u0924\u0947
-\u0939\u094b\u0928\u093e
-\u0939\u094b\u0928\u0947
-# additional normalized forms of the above
-\u0905\u092a\u0928\u093f
-\u091c\u0947\u0938\u0947
-\u0939\u094b\u0924\u093f
-\u0938\u092d\u093f
-\u0924\u093f\u0902\u0939\u094b\u0902
-\u0907\u0902\u0939\u094b\u0902
-\u0926\u0935\u093e\u0930\u093e
-\u0907\u0938\u093f
-\u0915\u093f\u0902\u0939\u0947\u0902
-\u0925\u093f
-\u0909\u0902\u0939\u094b\u0902
-\u0913\u0930
-\u091c\u093f\u0902\u0939\u0947\u0902
-\u0935\u0939\u093f\u0902
-\u0905\u092d\u093f
-\u092c\u0928\u093f
-\u0939\u093f
-\u0909\u0902\u0939\u093f\u0902
-\u0909\u0902\u0939\u0947\u0902
-\u0939\u0947\u0902
-\u0935\u0917\u0947\u0930\u0939
-\u090f\u0938\u0947
-\u0930\u0935\u093e\u0938\u093e
-\u0915\u094b\u0928
-\u0928\u093f\u091a\u0947
-\u0915\u093e\u092b\u093f
-\u0909\u0938\u093f
-\u092a\u0941\u0930\u093e
-\u092d\u093f\u0924\u0930
-\u0939\u0947
-\u092c\u0939\u093f
-\u0935\u0939\u093e\u0902
-\u0915\u094b\u0907
-\u092f\u0939\u093e\u0902
-\u091c\u093f\u0902\u0939\u094b\u0902
-\u0924\u093f\u0902\u0939\u0947\u0902
-\u0915\u093f\u0938\u093f
-\u0915\u0907
-\u092f\u0939\u093f
-\u0907\u0902\u0939\u093f\u0902
-\u091c\u093f\u0927\u0930
-\u0907\u0902\u0939\u0947\u0902
-\u0905\u0926\u093f
-\u0907\u0924\u092f\u093e\u0926\u093f
-\u0939\u0941\u0907
-\u0915\u094b\u0928\u0938\u093e
-\u0907\u0938\u0915\u093f
-\u0926\u0941\u0938\u0930\u0947
-\u091c\u0939\u093e\u0902
-\u0905\u092a
-\u0915\u093f\u0902\u0939\u094b\u0902
-\u0909\u0928\u0915\u093f
-\u092d\u093f
-\u0935\u0930\u0917
-\u0939\u0941\u0905
-\u091c\u0947\u0938\u093e
-\u0928\u0939\u093f\u0902

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hu.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hu.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hu.txt
deleted file mode 100644
index 37526da..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hu.txt
+++ /dev/null
@@ -1,211 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/hungarian/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
- 
-| Hungarian stop word list
-| prepared by Anna Tordai
-
-a
-ahogy
-ahol
-aki
-akik
-akkor
-alatt
-�ltal
-�ltal�ban
-amely
-amelyek
-amelyekben
-amelyeket
-amelyet
-amelynek
-ami
-amit
-amolyan
-am�g
-amikor
-�t
-abban
-ahhoz
-annak
-arra
-arr�l
-az
-azok
-azon
-azt
-azzal
-az�rt
-azt�n
-azut�n
-azonban
-b�r
-be
-bel�l
-benne
-cikk
-cikkek
-cikkeket
-csak
-de
-e
-eddig
-eg�sz
-egy
-egyes
-egyetlen
-egy�b
-egyik
-egyre
-ekkor
-el
-el�g
-ellen
-el\u0151
-el\u0151sz�r
-el\u0151tt
-els\u0151
-�n
-�ppen
-ebben
-ehhez
-emilyen
-ennek
-erre
-ez
-ezt
-ezek
-ezen
-ezzel
-ez�rt
-�s
-fel
-fel�
-hanem
-hiszen
-hogy
-hogyan
-igen
-�gy
-illetve
-ill.
-ill
-ilyen
-ilyenkor
-ison
-ism�t
-itt
-j�
-j�l
-jobban
-kell
-kellett
-kereszt�l
-keress�nk
-ki
-k�v�l
-k�z�tt
-k�z�l
-legal�bb
-lehet
-lehetett
-legyen
-lenne
-lenni
-lesz
-lett
-maga
-mag�t
-majd
-majd
-m�r
-m�s
-m�sik
-meg
-m�g
-mellett
-mert
-mely
-melyek
-mi
-mit
-m�g
-mi�rt
-milyen
-mikor
-minden
-mindent
-mindenki
-mindig
-mint
-mintha
-mivel
-most
-nagy
-nagyobb
-nagyon
-ne
-n�ha
-nekem
-neki
-nem
-n�h�ny
-n�lk�l
-nincs
-olyan
-ott
-�ssze
-\u0151
-\u0151k
-\u0151ket
-pedig
-persze
-r�
-s
-saj�t
-sem
-semmi
-sok
-sokat
-sokkal
-sz�m�ra
-szemben
-szerint
-szinte
-tal�n
-teh�t
-teljes
-tov�bb
-tov�bb�
-t�bb
-�gy
-ugyanis
-�j
-�jabb
-�jra
-ut�n
-ut�na
-utols�
-vagy
-vagyis
-valaki
-valami
-valamint
-val�
-vagyok
-van
-vannak
-volt
-voltam
-voltak
-voltunk
-vissza
-vele
-viszont
-volna

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hy.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hy.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hy.txt
deleted file mode 100644
index 60c1c50..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_hy.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-# example set of Armenian stopwords.
-\u0561\u0575\u0564
-\u0561\u0575\u056c
-\u0561\u0575\u0576
-\u0561\u0575\u057d
-\u0564\u0578\u0582
-\u0564\u0578\u0582\u0584
-\u0565\u0574
-\u0565\u0576
-\u0565\u0576\u0584
-\u0565\u057d
-\u0565\u0584
-\u0567
-\u0567\u056b
-\u0567\u056b\u0576
-\u0567\u056b\u0576\u0584
-\u0567\u056b\u0580
-\u0567\u056b\u0584
-\u0567\u0580
-\u0568\u057d\u057f
-\u0569
-\u056b
-\u056b\u0576
-\u056b\u057d\u056f
-\u056b\u0580
-\u056f\u0561\u0574
-\u0570\u0561\u0574\u0561\u0580
-\u0570\u0565\u057f
-\u0570\u0565\u057f\u0578
-\u0574\u0565\u0576\u0584
-\u0574\u0565\u057b
-\u0574\u056b
-\u0576
-\u0576\u0561
-\u0576\u0561\u0587
-\u0576\u0580\u0561
-\u0576\u0580\u0561\u0576\u0584
-\u0578\u0580
-\u0578\u0580\u0568
-\u0578\u0580\u0578\u0576\u0584
-\u0578\u0580\u057a\u0565\u057d
-\u0578\u0582
-\u0578\u0582\u0574
-\u057a\u056b\u057f\u056b
-\u057e\u0580\u0561
-\u0587

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_id.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_id.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_id.txt
deleted file mode 100644
index 4617f83..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_id.txt
+++ /dev/null
@@ -1,359 +0,0 @@
-# from appendix D of: A Study of Stemming Effects on Information
-# Retrieval in Bahasa Indonesia
-ada
-adanya
-adalah
-adapun
-agak
-agaknya
-agar
-akan
-akankah
-akhirnya
-aku
-akulah
-amat
-amatlah
-anda
-andalah
-antar
-diantaranya
-antara
-antaranya
-diantara
-apa
-apaan
-mengapa
-apabila
-apakah
-apalagi
-apatah
-atau
-ataukah
-ataupun
-bagai
-bagaikan
-sebagai
-sebagainya
-bagaimana
-bagaimanapun
-sebagaimana
-bagaimanakah
-bagi
-bahkan
-bahwa
-bahwasanya
-sebaliknya
-banyak
-sebanyak
-beberapa
-seberapa
-begini
-beginian
-beginikah
-beginilah
-sebegini
-begitu
-begitukah
-begitulah
-begitupun
-sebegitu
-belum
-belumlah
-sebelum
-sebelumnya
-sebenarnya
-berapa
-berapakah
-berapalah
-berapapun
-betulkah
-sebetulnya
-biasa
-biasanya
-bila
-bilakah
-bisa
-bisakah
-sebisanya
-boleh
-bolehkah
-bolehlah
-buat
-bukan
-bukankah
-bukanlah
-bukannya
-cuma
-percuma
-dahulu
-dalam
-dan
-dapat
-dari
-daripada
-dekat
-demi
-demikian
-demikianlah
-sedemikian
-dengan
-depan
-di
-dia
-dialah
-dini
-diri
-dirinya
-terdiri
-dong
-dulu
-enggak
-enggaknya
-entah
-entahlah
-terhadap
-terhadapnya
-hal
-hampir
-hanya
-hanyalah
-harus
-haruslah
-harusnya
-seharusnya
-hendak
-hendaklah
-hendaknya
-hingga
-sehingga
-ia
-ialah
-ibarat
-ingin
-inginkah
-inginkan
-ini
-inikah
-inilah
-itu
-itukah
-itulah
-jangan
-jangankan
-janganlah
-jika
-jikalau
-juga
-justru
-kala
-kalau
-kalaulah
-kalaupun
-kalian
-kami
-kamilah
-kamu
-kamulah
-kan
-kapan
-kapankah
-kapanpun
-dikarenakan
-karena
-karenanya
-ke
-kecil
-kemudian
-kenapa
-kepada
-kepadanya
-ketika
-seketika
-khususnya
-kini
-kinilah
-kiranya
-sekiranya
-kita
-kitalah
-kok
-lagi
-lagian
-selagi
-lah
-lain
-lainnya
-melainkan
-selaku
-lalu
-melalui
-terlalu
-lama
-lamanya
-selama
-selama
-selamanya
-lebih
-terlebih
-bermacam
-macam
-semacam
-maka
-makanya
-makin
-malah
-malahan
-mampu
-mampukah
-mana
-manakala
-manalagi
-masih
-masihkah
-semasih
-masing
-mau
-maupun
-semaunya
-memang
-mereka
-merekalah
-meski
-meskipun
-semula
-mungkin
-mungkinkah
-nah
-namun
-nanti
-nantinya
-nyaris
-oleh
-olehnya
-seorang
-seseorang
-pada
-padanya
-padahal
-paling
-sepanjang
-pantas
-sepantasnya
-sepantasnyalah
-para
-pasti
-pastilah
-per
-pernah
-pula
-pun
-merupakan
-rupanya
-serupa
-saat
-saatnya
-sesaat
-saja
-sajalah
-saling
-bersama
-sama
-sesama
-sambil
-sampai
-sana
-sangat
-sangatlah
-saya
-sayalah
-se
-sebab
-sebabnya
-sebuah
-tersebut
-tersebutlah
-sedang
-sedangkan
-sedikit
-sedikitnya
-segala
-segalanya
-segera
-sesegera
-sejak
-sejenak
-sekali
-sekalian
-sekalipun
-sesekali
-sekaligus
-sekarang
-sekarang
-sekitar
-sekitarnya
-sela
-selain
-selalu
-seluruh
-seluruhnya
-semakin
-sementara
-sempat
-semua
-semuanya
-sendiri
-sendirinya
-seolah
-seperti
-sepertinya
-sering
-seringnya
-serta
-siapa
-siapakah
-siapapun
-disini
-disinilah
-sini
-sinilah
-sesuatu
-sesuatunya
-suatu
-sesudah
-sesudahnya
-sudah
-sudahkah
-sudahlah
-supaya
-tadi
-tadinya
-tak
-tanpa
-setelah
-telah
-tentang
-tentu
-tentulah
-tentunya
-tertentu
-seterusnya
-tapi
-tetapi
-setiap
-tiap
-setidaknya
-tidak
-tidakkah
-tidaklah
-toh
-waduh
-wah
-wahai
-sewaktu
-walau
-walaupun
-wong
-yaitu
-yakni
-yang

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_it.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_it.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_it.txt
deleted file mode 100644
index 1219cc7..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_it.txt
+++ /dev/null
@@ -1,303 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/italian/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | An Italian stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
-ad             |  a (to) before vowel
-al             |  a + il
-allo           |  a + lo
-ai             |  a + i
-agli           |  a + gli
-all            |  a + l'
-agl            |  a + gl'
-alla           |  a + la
-alle           |  a + le
-con            |  with
-col            |  con + il
-coi            |  con + i (forms collo, cogli etc are now very rare)
-da             |  from
-dal            |  da + il
-dallo          |  da + lo
-dai            |  da + i
-dagli          |  da + gli
-dall           |  da + l'
-dagl           |  da + gll'
-dalla          |  da + la
-dalle          |  da + le
-di             |  of
-del            |  di + il
-dello          |  di + lo
-dei            |  di + i
-degli          |  di + gli
-dell           |  di + l'
-degl           |  di + gl'
-della          |  di + la
-delle          |  di + le
-in             |  in
-nel            |  in + el
-nello          |  in + lo
-nei            |  in + i
-negli          |  in + gli
-nell           |  in + l'
-negl           |  in + gl'
-nella          |  in + la
-nelle          |  in + le
-su             |  on
-sul            |  su + il
-sullo          |  su + lo
-sui            |  su + i
-sugli          |  su + gli
-sull           |  su + l'
-sugl           |  su + gl'
-sulla          |  su + la
-sulle          |  su + le
-per            |  through, by
-tra            |  among
-contro         |  against
-io             |  I
-tu             |  thou
-lui            |  he
-lei            |  she
-noi            |  we
-voi            |  you
-loro           |  they
-mio            |  my
-mia            |
-miei           |
-mie            |
-tuo            |
-tua            |
-tuoi           |  thy
-tue            |
-suo            |
-sua            |
-suoi           |  his, her
-sue            |
-nostro         |  our
-nostra         |
-nostri         |
-nostre         |
-vostro         |  your
-vostra         |
-vostri         |
-vostre         |
-mi             |  me
-ti             |  thee
-ci             |  us, there
-vi             |  you, there
-lo             |  him, the
-la             |  her, the
-li             |  them
-le             |  them, the
-gli            |  to him, the
-ne             |  from there etc
-il             |  the
-un             |  a
-uno            |  a
-una            |  a
-ma             |  but
-ed             |  and
-se             |  if
-perch�         |  why, because
-anche          |  also
-come           |  how
-dov            |  where (as dov')
-dove           |  where
-che            |  who, that
-chi            |  who
-cui            |  whom
-non            |  not
-pi�            |  more
-quale          |  who, that
-quanto         |  how much
-quanti         |
-quanta         |
-quante         |
-quello         |  that
-quelli         |
-quella         |
-quelle         |
-questo         |  this
-questi         |
-questa         |
-queste         |
-si             |  yes
-tutto          |  all
-tutti          |  all
-
-               |  single letter forms:
-
-a              |  at
-c              |  as c' for ce or ci
-e              |  and
-i              |  the
-l              |  as l'
-o              |  or
-
-               | forms of avere, to have (not including the infinitive):
-
-ho
-hai
-ha
-abbiamo
-avete
-hanno
-abbia
-abbiate
-abbiano
-avr�
-avrai
-avr�
-avremo
-avrete
-avranno
-avrei
-avresti
-avrebbe
-avremmo
-avreste
-avrebbero
-avevo
-avevi
-aveva
-avevamo
-avevate
-avevano
-ebbi
-avesti
-ebbe
-avemmo
-aveste
-ebbero
-avessi
-avesse
-avessimo
-avessero
-avendo
-avuto
-avuta
-avuti
-avute
-
-               | forms of essere, to be (not including the infinitive):
-sono
-sei
-�
-siamo
-siete
-sia
-siate
-siano
-sar�
-sarai
-sar�
-saremo
-sarete
-saranno
-sarei
-saresti
-sarebbe
-saremmo
-sareste
-sarebbero
-ero
-eri
-era
-eravamo
-eravate
-erano
-fui
-fosti
-fu
-fummo
-foste
-furono
-fossi
-fosse
-fossimo
-fossero
-essendo
-
-               | forms of fare, to do (not including the infinitive, fa, fat-):
-faccio
-fai
-facciamo
-fanno
-faccia
-facciate
-facciano
-far�
-farai
-far�
-faremo
-farete
-faranno
-farei
-faresti
-farebbe
-faremmo
-fareste
-farebbero
-facevo
-facevi
-faceva
-facevamo
-facevate
-facevano
-feci
-facesti
-fece
-facemmo
-faceste
-fecero
-facessi
-facesse
-facessimo
-facessero
-facendo
-
-               | forms of stare, to be (not including the infinitive):
-sto
-stai
-sta
-stiamo
-stanno
-stia
-stiate
-stiano
-star�
-starai
-star�
-staremo
-starete
-staranno
-starei
-staresti
-starebbe
-staremmo
-stareste
-starebbero
-stavo
-stavi
-stava
-stavamo
-stavate
-stavano
-stetti
-stesti
-stette
-stemmo
-steste
-stettero
-stessi
-stesse
-stessimo
-stessero
-stando

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ja.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ja.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ja.txt
deleted file mode 100644
index d4321be..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ja.txt
+++ /dev/null
@@ -1,127 +0,0 @@
-#
-# This file defines a stopword set for Japanese.
-#
-# This set is made up of hand-picked frequent terms from segmented Japanese Wikipedia.
-# Punctuation characters and frequent kanji have mostly been left out.  See LUCENE-3745
-# for frequency lists, etc. that can be useful for making your own set (if desired)
-#
-# Note that there is an overlap between these stopwords and the terms stopped when used
-# in combination with the JapanesePartOfSpeechStopFilter.  When editing this file, note
-# that comments are not allowed on the same line as stopwords.
-#
-# Also note that stopping is done in a case-insensitive manner.  Change your StopFilter
-# configuration if you need case-sensitive stopping.  Lastly, note that stopping is done
-# using the same character width as the entries in this file.  Since this StopFilter is
-# normally done after a CJKWidthFilter in your chain, you would usually want your romaji
-# entries to be in half-width and your kana entries to be in full-width.
-#
-\u306e
-\u306b
-\u306f
-\u3092
-\u305f
-\u304c
-\u3067
-\u3066
-\u3068
-\u3057
-\u308c
-\u3055
-\u3042\u308b
-\u3044\u308b
-\u3082
-\u3059\u308b
-\u304b\u3089
-\u306a
-\u3053\u3068
-\u3068\u3057\u3066
-\u3044
-\u3084
-\u308c\u308b
-\u306a\u3069
-\u306a\u3063
-\u306a\u3044
-\u3053\u306e
-\u305f\u3081
-\u305d\u306e
-\u3042\u3063
-\u3088\u3046
-\u307e\u305f
-\u3082\u306e
-\u3068\u3044\u3046
-\u3042\u308a
-\u307e\u3067
-\u3089\u308c
-\u306a\u308b
-\u3078
-\u304b
-\u3060
-\u3053\u308c
-\u306b\u3088\u3063\u3066
-\u306b\u3088\u308a
-\u304a\u308a
-\u3088\u308a
-\u306b\u3088\u308b
-\u305a
-\u306a\u308a
-\u3089\u308c\u308b
-\u306b\u304a\u3044\u3066
-\u3070
-\u306a\u304b\u3063
-\u306a\u304f
-\u3057\u304b\u3057
-\u306b\u3064\u3044\u3066
-\u305b
-\u3060\u3063
-\u305d\u306e\u5f8c
-\u3067\u304d\u308b
-\u305d\u308c
-\u3046
-\u306e\u3067
-\u306a\u304a
-\u306e\u307f
-\u3067\u304d
-\u304d
-\u3064
-\u306b\u304a\u3051\u308b
-\u304a\u3088\u3073
-\u3044\u3046
-\u3055\u3089\u306b
-\u3067\u3082
-\u3089
-\u305f\u308a
-\u305d\u306e\u4ed6
-\u306b\u95a2\u3059\u308b
-\u305f\u3061
-\u307e\u3059
-\u3093
-\u306a\u3089
-\u306b\u5bfe\u3057\u3066
-\u7279\u306b
-\u305b\u308b
-\u53ca\u3073
-\u3053\u308c\u3089
-\u3068\u304d
-\u3067\u306f
-\u306b\u3066
-\u307b\u304b
-\u306a\u304c\u3089
-\u3046\u3061
-\u305d\u3057\u3066
-\u3068\u3068\u3082\u306b
-\u305f\u3060\u3057
-\u304b\u3064\u3066
-\u305d\u308c\u305e\u308c
-\u307e\u305f\u306f
-\u304a
-\u307b\u3069
-\u3082\u306e\u306e
-\u306b\u5bfe\u3059\u308b
-\u307b\u3068\u3093\u3069
-\u3068\u5171\u306b
-\u3068\u3044\u3063\u305f
-\u3067\u3059
-\u3068\u3082
-\u3068\u3053\u308d
-\u3053\u3053
-##### End of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_lv.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_lv.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_lv.txt
deleted file mode 100644
index e21a23c..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_lv.txt
+++ /dev/null
@@ -1,172 +0,0 @@
-# Set of Latvian stopwords from A Stemming Algorithm for Latvian, Karlis Kreslins
-# the original list of over 800 forms was refined: 
-#   pronouns, adverbs, interjections were removed
-# 
-# prepositions
-aiz
-ap
-ar
-apak\u0161
-\u0101rpus
-aug\u0161pus
-bez
-caur
-d\u0113\u013c
-gar
-iek\u0161
-iz
-kop\u0161
-labad
-lejpus
-l\u012bdz
-no
-otrpus
-pa
-par
-p\u0101r
-p\u0113c
-pie
-pirms
-pret
-priek\u0161
-starp
-\u0161aipus
-uz
-vi\u0146pus
-virs
-virspus
-zem
-apak\u0161pus
-# Conjunctions
-un
-bet
-jo
-ja
-ka
-lai
-tom\u0113r
-tikko
-turpret\u012b
-ar\u012b
-kaut
-gan
-t\u0101d\u0113\u013c
-t\u0101
-ne
-tikvien
-vien
-k\u0101
-ir
-te
-vai
-kam\u0113r
-# Particles
-ar
-diezin
-dro\u0161i
-diem\u017e\u0113l
-neb\u016bt
-ik
-it
-ta\u010du
-nu
-pat
-tiklab
-iek\u0161pus
-nedz
-tik
-nevis
-turpretim
-jeb
-iekam
-iek\u0101m
-iek\u0101ms
-kol\u012bdz
-l\u012bdzko
-tikl\u012bdz
-jeb\u0161u
-t\u0101lab
-t\u0101p\u0113c
-nek\u0101
-itin
-j\u0101
-jau
-jel
-n\u0113
-nezin
-tad
-tikai
-vis
-tak
-iekams
-vien
-# modal verbs
-b\u016bt  
-biju 
-biji
-bija
-bij\u0101m
-bij\u0101t
-esmu
-esi
-esam
-esat 
-b\u016b\u0161u     
-b\u016bsi
-b\u016bs
-b\u016bsim
-b\u016bsiet
-tikt
-tiku
-tiki
-tika
-tik\u0101m
-tik\u0101t
-tieku
-tiec
-tiek
-tiekam
-tiekat
-tik\u0161u
-tiks
-tiksim
-tiksiet
-tapt
-tapi
-tap\u0101t
-topat
-tap\u0161u
-tapsi
-taps
-tapsim
-tapsiet
-k\u013c\u016bt
-k\u013cuvu
-k\u013cuvi
-k\u013cuva
-k\u013cuv\u0101m
-k\u013cuv\u0101t
-k\u013c\u016bstu
-k\u013c\u016bsti
-k\u013c\u016bst
-k\u013c\u016bstam
-k\u013c\u016bstat
-k\u013c\u016b\u0161u
-k\u013c\u016bsi
-k\u013c\u016bs
-k\u013c\u016bsim
-k\u013c\u016bsiet
-# verbs
-var\u0113t
-var\u0113ju
-var\u0113j\u0101m
-var\u0113\u0161u
-var\u0113sim
-var
-var\u0113ji
-var\u0113j\u0101t
-var\u0113si
-var\u0113siet
-varat
-var\u0113ja
-var\u0113s

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_nl.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_nl.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_nl.txt
deleted file mode 100644
index 47a2aea..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_nl.txt
+++ /dev/null
@@ -1,119 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/dutch/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Dutch stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
- | This is a ranked list (commonest to rarest) of stopwords derived from
- | a large sample of Dutch text.
-
- | Dutch stop words frequently exhibit homonym clashes. These are indicated
- | clearly below.
-
-de             |  the
-en             |  and
-van            |  of, from
-ik             |  I, the ego
-te             |  (1) chez, at etc, (2) to, (3) too
-dat            |  that, which
-die            |  that, those, who, which
-in             |  in, inside
-een            |  a, an, one
-hij            |  he
-het            |  the, it
-niet           |  not, nothing, naught
-zijn           |  (1) to be, being, (2) his, one's, its
-is             |  is
-was            |  (1) was, past tense of all persons sing. of 'zijn' (to be) (2) wax, (3) the washing, (4) rise of river
-op             |  on, upon, at, in, up, used up
-aan            |  on, upon, to (as dative)
-met            |  with, by
-als            |  like, such as, when
-voor           |  (1) before, in front of, (2) furrow
-had            |  had, past tense all persons sing. of 'hebben' (have)
-er             |  there
-maar           |  but, only
-om             |  round, about, for etc
-hem            |  him
-dan            |  then
-zou            |  should/would, past tense all persons sing. of 'zullen'
-of             |  or, whether, if
-wat            |  what, something, anything
-mijn           |  possessive and noun 'mine'
-men            |  people, 'one'
-dit            |  this
-zo             |  so, thus, in this way
-door           |  through by
-over           |  over, across
-ze             |  she, her, they, them
-zich           |  oneself
-bij            |  (1) a bee, (2) by, near, at
-ook            |  also, too
-tot            |  till, until
-je             |  you
-mij            |  me
-uit            |  out of, from
-der            |  Old Dutch form of 'van der' still found in surnames
-daar           |  (1) there, (2) because
-haar           |  (1) her, their, them, (2) hair
-naar           |  (1) unpleasant, unwell etc, (2) towards, (3) as
-heb            |  present first person sing. of 'to have'
-hoe            |  how, why
-heeft          |  present third person sing. of 'to have'
-hebben         |  'to have' and various parts thereof
-deze           |  this
-u              |  you
-want           |  (1) for, (2) mitten, (3) rigging
-nog            |  yet, still
-zal            |  'shall', first and third person sing. of verb 'zullen' (will)
-me             |  me
-zij            |  she, they
-nu             |  now
-ge             |  'thou', still used in Belgium and south Netherlands
-geen           |  none
-omdat          |  because
-iets           |  something, somewhat
-worden         |  to become, grow, get
-toch           |  yet, still
-al             |  all, every, each
-waren          |  (1) 'were' (2) to wander, (3) wares, (3)
-veel           |  much, many
-meer           |  (1) more, (2) lake
-doen           |  to do, to make
-toen           |  then, when
-moet           |  noun 'spot/mote' and present form of 'to must'
-ben            |  (1) am, (2) 'are' in interrogative second person singular of 'to be'
-zonder         |  without
-kan            |  noun 'can' and present form of 'to be able'
-hun            |  their, them
-dus            |  so, consequently
-alles          |  all, everything, anything
-onder          |  under, beneath
-ja             |  yes, of course
-eens           |  once, one day
-hier           |  here
-wie            |  who
-werd           |  imperfect third person sing. of 'become'
-altijd         |  always
-doch           |  yet, but etc
-wordt          |  present third person sing. of 'become'
-wezen          |  (1) to be, (2) 'been' as in 'been fishing', (3) orphans
-kunnen         |  to be able
-ons            |  us/our
-zelf           |  self
-tegen          |  against, towards, at
-na             |  after, near
-reeds          |  already
-wil            |  (1) present tense of 'want', (2) 'will', noun, (3) fender
-kon            |  could; past tense of 'to be able'
-niets          |  nothing
-uw             |  your
-iemand         |  somebody
-geweest        |  been; past participle of 'be'
-andere         |  other

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_no.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_no.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_no.txt
deleted file mode 100644
index a7a2c28..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_no.txt
+++ /dev/null
@@ -1,194 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/norwegian/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Norwegian stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
- | This stop word list is for the dominant bokm�l dialect. Words unique
- | to nynorsk are marked *.
-
- | Revised by Jan Bruusgaard <Ja...@ssb.no>, Jan 2005
-
-og             | and
-i              | in
-jeg            | I
-det            | it/this/that
-at             | to (w. inf.)
-en             | a/an
-et             | a/an
-den            | it/this/that
-til            | to
-er             | is/am/are
-som            | who/that
-p�             | on
-de             | they / you(formal)
-med            | with
-han            | he
-av             | of
-ikke           | not
-ikkje          | not *
-der            | there
-s�             | so
-var            | was/were
-meg            | me
-seg            | you
-men            | but
-ett            | one
-har            | have
-om             | about
-vi             | we
-min            | my
-mitt           | my
-ha             | have
-hadde          | had
-hun            | she
-n�             | now
-over           | over
-da             | when/as
-ved            | by/know
-fra            | from
-du             | you
-ut             | out
-sin            | your
-dem            | them
-oss            | us
-opp            | up
-man            | you/one
-kan            | can
-hans           | his
-hvor           | where
-eller          | or
-hva            | what
-skal           | shall/must
-selv           | self (reflective)
-sj�l           | self (reflective)
-her            | here
-alle           | all
-vil            | will
-bli            | become
-ble            | became
-blei           | became *
-blitt          | have become
-kunne          | could
-inn            | in
-n�r            | when
-v�re           | be
-kom            | come
-noen           | some
-noe            | some
-ville          | would
-dere           | you
-som            | who/which/that
-deres          | their/theirs
-kun            | only/just
-ja             | yes
-etter          | after
-ned            | down
-skulle         | should
-denne          | this
-for            | for/because
-deg            | you
-si             | hers/his
-sine           | hers/his
-sitt           | hers/his
-mot            | against
-�              | to
-meget          | much
-hvorfor        | why
-dette          | this
-disse          | these/those
-uten           | without
-hvordan        | how
-ingen          | none
-din            | your
-ditt           | your
-blir           | become
-samme          | same
-hvilken        | which
-hvilke         | which (plural)
-s�nn           | such a
-inni           | inside/within
-mellom         | between
-v�r            | our
-hver           | each
-hvem           | who
-vors           | us/ours
-hvis           | whose
-b�de           | both
-bare           | only/just
-enn            | than
-fordi          | as/because
-f�r            | before
-mange          | many
-ogs�           | also
-slik           | just
-v�rt           | been
-v�re           | to be
-b�e            | both *
-begge          | both
-siden          | since
-dykk           | your *
-dykkar         | yours *
-dei            | they *
-deira          | them *
-deires         | theirs *
-deim           | them *
-di             | your (fem.) *
-d�             | as/when *
-eg             | I *
-ein            | a/an *
-eit            | a/an *
-eitt           | a/an *
-elles          | or *
-honom          | he *
-hj�            | at *
-ho             | she *
-hoe            | she *
-henne          | her
-hennar         | her/hers
-hennes         | hers
-hoss           | how *
-hossen         | how *
-ikkje          | not *
-ingi           | noone *
-inkje          | noone *
-korleis        | how *
-korso          | how *
-kva            | what/which *
-kvar           | where *
-kvarhelst      | where *
-kven           | who/whom *
-kvi            | why *
-kvifor         | why *
-me             | we *
-medan          | while *
-mi             | my *
-mine           | my *
-mykje          | much *
-no             | now *
-nokon          | some (masc./neut.) *
-noka           | some (fem.) *
-nokor          | some *
-noko           | some *
-nokre          | some *
-si             | his/hers *
-sia            | since *
-sidan          | since *
-so             | so *
-somt           | some *
-somme          | some *
-um             | about*
-upp            | up *
-vere           | be *
-vore           | was *
-verte          | become *
-vort           | become *
-varte          | became *
-vart           | became *
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_pt.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_pt.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_pt.txt
deleted file mode 100644
index acfeb01..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_pt.txt
+++ /dev/null
@@ -1,253 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/portuguese/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Portuguese stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
-
- | The following is a ranked list (commonest to rarest) of stopwords
- | deriving from a large sample of text.
-
- | Extra words have been added at the end.
-
-de             |  of, from
-a              |  the; to, at; her
-o              |  the; him
-que            |  who, that
-e              |  and
-do             |  de + o
-da             |  de + a
-em             |  in
-um             |  a
-para           |  for
-  | �          from SER
-com            |  with
-n�o            |  not, no
-uma            |  a
-os             |  the; them
-no             |  em + o
-se             |  himself etc
-na             |  em + a
-por            |  for
-mais           |  more
-as             |  the; them
-dos            |  de + os
-como           |  as, like
-mas            |  but
-  | foi        from SER
-ao             |  a + o
-ele            |  he
-das            |  de + as
-  | tem        from TER
-�              |  a + a
-seu            |  his
-sua            |  her
-ou             |  or
-  | ser        from SER
-quando         |  when
-muito          |  much
-  | h�         from HAV
-nos            |  em + os; us
-j�             |  already, now
-  | est�       from EST
-eu             |  I
-tamb�m         |  also
-s�             |  only, just
-pelo           |  per + o
-pela           |  per + a
-at�            |  up to
-isso           |  that
-ela            |  he
-entre          |  between
-  | era        from SER
-depois         |  after
-sem            |  without
-mesmo          |  same
-aos            |  a + os
-  | ter        from TER
-seus           |  his
-quem           |  whom
-nas            |  em + as
-me             |  me
-esse           |  that
-eles           |  they
-  | est�o      from EST
-voc�           |  you
-  | tinha      from TER
-  | foram      from SER
-essa           |  that
-num            |  em + um
-nem            |  nor
-suas           |  her
-meu            |  my
-�s             |  a + as
-minha          |  my
-  | t�m        from TER
-numa           |  em + uma
-pelos          |  per + os
-elas           |  they
-  | havia      from HAV
-  | seja       from SER
-qual           |  which
-  | ser�       from SER
-n�s            |  we
-  | tenho      from TER
-lhe            |  to him, her
-deles          |  of them
-essas          |  those
-esses          |  those
-pelas          |  per + as
-este           |  this
-  | fosse      from SER
-dele           |  of him
-
- | other words. There are many contractions such as naquele = em+aquele,
- | mo = me+o, but they are rare.
- | Indefinite article plural forms are also rare.
-
-tu             |  thou
-te             |  thee
-voc�s          |  you (plural)
-vos            |  you
-lhes           |  to them
-meus           |  my
-minhas
-teu            |  thy
-tua
-teus
-tuas
-nosso          | our
-nossa
-nossos
-nossas
-
-dela           |  of her
-delas          |  of them
-
-esta           |  this
-estes          |  these
-estas          |  these
-aquele         |  that
-aquela         |  that
-aqueles        |  those
-aquelas        |  those
-isto           |  this
-aquilo         |  that
-
-               | forms of estar, to be (not including the infinitive):
-estou
-est�
-estamos
-est�o
-estive
-esteve
-estivemos
-estiveram
-estava
-est�vamos
-estavam
-estivera
-estiv�ramos
-esteja
-estejamos
-estejam
-estivesse
-estiv�ssemos
-estivessem
-estiver
-estivermos
-estiverem
-
-               | forms of haver, to have (not including the infinitive):
-hei
-h�
-havemos
-h�o
-houve
-houvemos
-houveram
-houvera
-houv�ramos
-haja
-hajamos
-hajam
-houvesse
-houv�ssemos
-houvessem
-houver
-houvermos
-houverem
-houverei
-houver�
-houveremos
-houver�o
-houveria
-houver�amos
-houveriam
-
-               | forms of ser, to be (not including the infinitive):
-sou
-somos
-s�o
-era
-�ramos
-eram
-fui
-foi
-fomos
-foram
-fora
-f�ramos
-seja
-sejamos
-sejam
-fosse
-f�ssemos
-fossem
-for
-formos
-forem
-serei
-ser�
-seremos
-ser�o
-seria
-ser�amos
-seriam
-
-               | forms of ter, to have (not including the infinitive):
-tenho
-tem
-temos
-t�m
-tinha
-t�nhamos
-tinham
-tive
-teve
-tivemos
-tiveram
-tivera
-tiv�ramos
-tenha
-tenhamos
-tenham
-tivesse
-tiv�ssemos
-tivessem
-tiver
-tivermos
-tiverem
-terei
-ter�
-teremos
-ter�o
-teria
-ter�amos
-teriam

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ro.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ro.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ro.txt
deleted file mode 100644
index 4fdee90..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ro.txt
+++ /dev/null
@@ -1,233 +0,0 @@
-# This file was created by Jacques Savoy and is distributed under the BSD license.
-# See http://members.unine.ch/jacques.savoy/clef/index.html.
-# Also see http://www.opensource.org/licenses/bsd-license.html
-acea
-aceasta
-aceast\u0103
-aceea
-acei
-aceia
-acel
-acela
-acele
-acelea
-acest
-acesta
-aceste
-acestea
-ace\u015fti
-ace\u015ftia
-acolo
-acum
-ai
-aia
-aib\u0103
-aici
-al
-\u0103la
-ale
-alea
-\u0103lea
-altceva
-altcineva
-am
-ar
-are
-a\u015f
-a\u015fadar
-asemenea
-asta
-\u0103sta
-ast\u0103zi
-astea
-\u0103stea
-\u0103\u015ftia
-asupra
-a\u0163i
-au
-avea
-avem
-ave\u0163i
-azi
-bine
-bucur
-bun\u0103
-ca
-c\u0103
-c\u0103ci
-c�nd
-care
-c\u0103rei
-c\u0103ror
-c\u0103rui
-c�t
-c�te
-c�\u0163i
-c\u0103tre
-c�tva
-ce
-cel
-ceva
-chiar
-c�nd
-cine
-cineva
-c�t
-c�te
-c�\u0163i
-c�tva
-contra
-cu
-cum
-cumva
-cur�nd
-cur�nd
-da
-d\u0103
-dac\u0103
-dar
-datorit\u0103
-de
-deci
-deja
-deoarece
-departe
-de\u015fi
-din
-dinaintea
-dintr
-dintre
-drept
-dup\u0103
-ea
-ei
-el
-ele
-eram
-este
-e\u015fti
-eu
-face
-f\u0103r\u0103
-fi
-fie
-fiecare
-fii
-fim
-fi\u0163i
-iar
-ieri
-�i
-�l
-�mi
-�mpotriva
-�n 
-�nainte
-�naintea
-�nc�t
-�nc�t
-�ncotro
-�ntre
-�ntruc�t
-�ntruc�t
-�\u0163i
-la
-l�ng\u0103
-le
-li
-l�ng\u0103
-lor
-lui
-m\u0103
-m�ine
-mea
-mei
-mele
-mereu
-meu
-mi
-mine
-mult
-mult\u0103
-mul\u0163i
-ne
-nic\u0103ieri
-nici
-nimeni
-ni\u015fte
-noastr\u0103
-noastre
-noi
-no\u015ftri
-nostru
-nu
-ori
-oric�nd
-oricare
-oric�t
-orice
-oric�nd
-oricine
-oric�t
-oricum
-oriunde
-p�n\u0103
-pe
-pentru
-peste
-p�n\u0103
-poate
-pot
-prea
-prima
-primul
-prin
-printr
-sa
-s\u0103
-s\u0103i
-sale
-sau
-s\u0103u
-se
-\u015fi
-s�nt
-s�ntem
-s�nte\u0163i
-spre
-sub
-sunt
-suntem
-sunte\u0163i
-ta
-t\u0103i
-tale
-t\u0103u
-te
-\u0163i
-\u0163ie
-tine
-toat\u0103
-toate
-tot
-to\u0163i
-totu\u015fi
-tu
-un
-una
-unde
-undeva
-unei
-unele
-uneori
-unor
-v\u0103
-vi
-voastr\u0103
-voastre
-voi
-vo\u015ftri
-vostru
-vou\u0103
-vreo
-vreun

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ru.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ru.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ru.txt
deleted file mode 100644
index 5527140..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ru.txt
+++ /dev/null
@@ -1,243 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/russian/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | a russian stop word list. comments begin with vertical bar. each stop
- | word is at the start of a line.
-
- | this is a ranked list (commonest to rarest) of stopwords derived from
- | a large text sample.
-
- | letter `\u0451' is translated to `\u0435'.
-
-\u0438              | and
-\u0432              | in/into
-\u0432\u043e             | alternative form
-\u043d\u0435             | not
-\u0447\u0442\u043e            | what/that
-\u043e\u043d             | he
-\u043d\u0430             | on/onto
-\u044f              | i
-\u0441              | from
-\u0441\u043e             | alternative form
-\u043a\u0430\u043a            | how
-\u0430              | milder form of `no' (but)
-\u0442\u043e             | conjunction and form of `that'
-\u0432\u0441\u0435            | all
-\u043e\u043d\u0430            | she
-\u0442\u0430\u043a            | so, thus
-\u0435\u0433\u043e            | him
-\u043d\u043e             | but
-\u0434\u0430             | yes/and
-\u0442\u044b             | thou
-\u043a              | towards, by
-\u0443              | around, chez
-\u0436\u0435             | intensifier particle
-\u0432\u044b             | you
-\u0437\u0430             | beyond, behind
-\u0431\u044b             | conditional/subj. particle
-\u043f\u043e             | up to, along
-\u0442\u043e\u043b\u044c\u043a\u043e         | only
-\u0435\u0435             | her
-\u043c\u043d\u0435            | to me
-\u0431\u044b\u043b\u043e           | it was
-\u0432\u043e\u0442            | here is/are, particle
-\u043e\u0442             | away from
-\u043c\u0435\u043d\u044f           | me
-\u0435\u0449\u0435            | still, yet, more
-\u043d\u0435\u0442            | no, there isnt/arent
-\u043e              | about
-\u0438\u0437             | out of
-\u0435\u043c\u0443            | to him
-\u0442\u0435\u043f\u0435\u0440\u044c         | now
-\u043a\u043e\u0433\u0434\u0430          | when
-\u0434\u0430\u0436\u0435           | even
-\u043d\u0443             | so, well
-\u0432\u0434\u0440\u0443\u0433          | suddenly
-\u043b\u0438             | interrogative particle
-\u0435\u0441\u043b\u0438           | if
-\u0443\u0436\u0435            | already, but homonym of `narrower'
-\u0438\u043b\u0438            | or
-\u043d\u0438             | neither
-\u0431\u044b\u0442\u044c           | to be
-\u0431\u044b\u043b            | he was
-\u043d\u0435\u0433\u043e           | prepositional form of \u0435\u0433\u043e
-\u0434\u043e             | up to
-\u0432\u0430\u0441            | you accusative
-\u043d\u0438\u0431\u0443\u0434\u044c         | indef. suffix preceded by hyphen
-\u043e\u043f\u044f\u0442\u044c          | again
-\u0443\u0436             | already, but homonym of `adder'
-\u0432\u0430\u043c            | to you
-\u0441\u043a\u0430\u0437\u0430\u043b         | he said
-\u0432\u0435\u0434\u044c           | particle `after all'
-\u0442\u0430\u043c            | there
-\u043f\u043e\u0442\u043e\u043c          | then
-\u0441\u0435\u0431\u044f           | oneself
-\u043d\u0438\u0447\u0435\u0433\u043e         | nothing
-\u0435\u0439             | to her
-\u043c\u043e\u0436\u0435\u0442          | usually with `\u0431\u044b\u0442\u044c' as `maybe'
-\u043e\u043d\u0438            | they
-\u0442\u0443\u0442            | here
-\u0433\u0434\u0435            | where
-\u0435\u0441\u0442\u044c           | there is/are
-\u043d\u0430\u0434\u043e           | got to, must
-\u043d\u0435\u0439            | prepositional form of  \u0435\u0439
-\u0434\u043b\u044f            | for
-\u043c\u044b             | we
-\u0442\u0435\u0431\u044f           | thee
-\u0438\u0445             | them, their
-\u0447\u0435\u043c            | than
-\u0431\u044b\u043b\u0430           | she was
-\u0441\u0430\u043c            | self
-\u0447\u0442\u043e\u0431           | in order to
-\u0431\u0435\u0437            | without
-\u0431\u0443\u0434\u0442\u043e          | as if
-\u0447\u0435\u043b\u043e\u0432\u0435\u043a        | man, person, one
-\u0447\u0435\u0433\u043e           | genitive form of `what'
-\u0440\u0430\u0437            | once
-\u0442\u043e\u0436\u0435           | also
-\u0441\u0435\u0431\u0435           | to oneself
-\u043f\u043e\u0434            | beneath
-\u0436\u0438\u0437\u043d\u044c          | life
-\u0431\u0443\u0434\u0435\u0442          | will be
-\u0436              | short form of intensifer particle `\u0436\u0435'
-\u0442\u043e\u0433\u0434\u0430          | then
-\u043a\u0442\u043e            | who
-\u044d\u0442\u043e\u0442           | this
-\u0433\u043e\u0432\u043e\u0440\u0438\u043b        | was saying
-\u0442\u043e\u0433\u043e           | genitive form of `that'
-\u043f\u043e\u0442\u043e\u043c\u0443         | for that reason
-\u044d\u0442\u043e\u0433\u043e          | genitive form of `this'
-\u043a\u0430\u043a\u043e\u0439          | which
-\u0441\u043e\u0432\u0441\u0435\u043c         | altogether
-\u043d\u0438\u043c            | prepositional form of `\u0435\u0433\u043e', `\u043e\u043d\u0438'
-\u0437\u0434\u0435\u0441\u044c          | here
-\u044d\u0442\u043e\u043c           | prepositional form of `\u044d\u0442\u043e\u0442'
-\u043e\u0434\u0438\u043d           | one
-\u043f\u043e\u0447\u0442\u0438          | almost
-\u043c\u043e\u0439            | my
-\u0442\u0435\u043c            | instrumental/dative plural of `\u0442\u043e\u0442', `\u0442\u043e'
-\u0447\u0442\u043e\u0431\u044b          | full form of `in order that'
-\u043d\u0435\u0435            | her (acc.)
-\u043a\u0430\u0436\u0435\u0442\u0441\u044f        | it seems
-\u0441\u0435\u0439\u0447\u0430\u0441         | now
-\u0431\u044b\u043b\u0438           | they were
-\u043a\u0443\u0434\u0430           | where to
-\u0437\u0430\u0447\u0435\u043c          | why
-\u0441\u043a\u0430\u0437\u0430\u0442\u044c        | to say
-\u0432\u0441\u0435\u0445           | all (acc., gen. preposn. plural)
-\u043d\u0438\u043a\u043e\u0433\u0434\u0430        | never
-\u0441\u0435\u0433\u043e\u0434\u043d\u044f        | today
-\u043c\u043e\u0436\u043d\u043e          | possible, one can
-\u043f\u0440\u0438            | by
-\u043d\u0430\u043a\u043e\u043d\u0435\u0446        | finally
-\u0434\u0432\u0430            | two
-\u043e\u0431             | alternative form of `\u043e', about
-\u0434\u0440\u0443\u0433\u043e\u0439         | another
-\u0445\u043e\u0442\u044c           | even
-\u043f\u043e\u0441\u043b\u0435          | after
-\u043d\u0430\u0434            | above
-\u0431\u043e\u043b\u044c\u0448\u0435         | more
-\u0442\u043e\u0442            | that one (masc.)
-\u0447\u0435\u0440\u0435\u0437          | across, in
-\u044d\u0442\u0438            | these
-\u043d\u0430\u0441            | us
-\u043f\u0440\u043e            | about
-\u0432\u0441\u0435\u0433\u043e          | in all, only, of all
-\u043d\u0438\u0445            | prepositional form of `\u043e\u043d\u0438' (they)
-\u043a\u0430\u043a\u0430\u044f          | which, feminine
-\u043c\u043d\u043e\u0433\u043e          | lots
-\u0440\u0430\u0437\u0432\u0435          | interrogative particle
-\u0441\u043a\u0430\u0437\u0430\u043b\u0430        | she said
-\u0442\u0440\u0438            | three
-\u044d\u0442\u0443            | this, acc. fem. sing.
-\u043c\u043e\u044f            | my, feminine
-\u0432\u043f\u0440\u043e\u0447\u0435\u043c        | moreover, besides
-\u0445\u043e\u0440\u043e\u0448\u043e         | good
-\u0441\u0432\u043e\u044e           | ones own, acc. fem. sing.
-\u044d\u0442\u043e\u0439           | oblique form of `\u044d\u0442\u0430', fem. `this'
-\u043f\u0435\u0440\u0435\u0434          | in front of
-\u0438\u043d\u043e\u0433\u0434\u0430         | sometimes
-\u043b\u0443\u0447\u0448\u0435          | better
-\u0447\u0443\u0442\u044c           | a little
-\u0442\u043e\u043c            | preposn. form of `that one'
-\u043d\u0435\u043b\u044c\u0437\u044f         | one must not
-\u0442\u0430\u043a\u043e\u0439          | such a one
-\u0438\u043c             | to them
-\u0431\u043e\u043b\u0435\u0435          | more
-\u0432\u0441\u0435\u0433\u0434\u0430         | always
-\u043a\u043e\u043d\u0435\u0447\u043d\u043e        | of course
-\u0432\u0441\u044e            | acc. fem. sing of `all'
-\u043c\u0435\u0436\u0434\u0443          | between
-
-
-  | b: some paradigms
-  |
-  | personal pronouns
-  |
-  | \u044f  \u043c\u0435\u043d\u044f  \u043c\u043d\u0435  \u043c\u043d\u043e\u0439  [\u043c\u043d\u043e\u044e]
-  | \u0442\u044b  \u0442\u0435\u0431\u044f  \u0442\u0435\u0431\u0435  \u0442\u043e\u0431\u043e\u0439  [\u0442\u043e\u0431\u043e\u044e]
-  | \u043e\u043d  \u0435\u0433\u043e  \u0435\u043c\u0443  \u0438\u043c  [\u043d\u0435\u0433\u043e, \u043d\u0435\u043c\u0443, \u043d\u0438\u043c]
-  | \u043e\u043d\u0430  \u0435\u0435  \u044d\u0438  \u0435\u044e  [\u043d\u0435\u0435, \u043d\u044d\u0438, \u043d\u0435\u044e]
-  | \u043e\u043d\u043e  \u0435\u0433\u043e  \u0435\u043c\u0443  \u0438\u043c  [\u043d\u0435\u0433\u043e, \u043d\u0435\u043c\u0443, \u043d\u0438\u043c]
-  |
-  | \u043c\u044b  \u043d\u0430\u0441  \u043d\u0430\u043c  \u043d\u0430\u043c\u0438
-  | \u0432\u044b  \u0432\u0430\u0441  \u0432\u0430\u043c  \u0432\u0430\u043c\u0438
-  | \u043e\u043d\u0438  \u0438\u0445  \u0438\u043c  \u0438\u043c\u0438  [\u043d\u0438\u0445, \u043d\u0438\u043c, \u043d\u0438\u043c\u0438]
-  |
-  |   \u0441\u0435\u0431\u044f  \u0441\u0435\u0431\u0435  \u0441\u043e\u0431\u043e\u0439   [\u0441\u043e\u0431\u043e\u044e]
-  |
-  | demonstrative pronouns: \u044d\u0442\u043e\u0442 (this), \u0442\u043e\u0442 (that)
-  |
-  | \u044d\u0442\u043e\u0442  \u044d\u0442\u0430  \u044d\u0442\u043e  \u044d\u0442\u0438
-  | \u044d\u0442\u043e\u0433\u043e  \u044d\u0442\u044b  \u044d\u0442\u043e  \u044d\u0442\u0438
-  | \u044d\u0442\u043e\u0433\u043e  \u044d\u0442\u043e\u0439  \u044d\u0442\u043e\u0433\u043e  \u044d\u0442\u0438\u0445
-  | \u044d\u0442\u043e\u043c\u0443  \u044d\u0442\u043e\u0439  \u044d\u0442\u043e\u043c\u0443  \u044d\u0442\u0438\u043c
-  | \u044d\u0442\u0438\u043c  \u044d\u0442\u043e\u0439  \u044d\u0442\u0438\u043c  [\u044d\u0442\u043e\u044e]  \u044d\u0442\u0438\u043c\u0438
-  | \u044d\u0442\u043e\u043c  \u044d\u0442\u043e\u0439  \u044d\u0442\u043e\u043c  \u044d\u0442\u0438\u0445
-  |
-  | \u0442\u043e\u0442  \u0442\u0430  \u0442\u043e  \u0442\u0435
-  | \u0442\u043e\u0433\u043e  \u0442\u0443  \u0442\u043e  \u0442\u0435
-  | \u0442\u043e\u0433\u043e  \u0442\u043e\u0439  \u0442\u043e\u0433\u043e  \u0442\u0435\u0445
-  | \u0442\u043e\u043c\u0443  \u0442\u043e\u0439  \u0442\u043e\u043c\u0443  \u0442\u0435\u043c
-  | \u0442\u0435\u043c  \u0442\u043e\u0439  \u0442\u0435\u043c  [\u0442\u043e\u044e]  \u0442\u0435\u043c\u0438
-  | \u0442\u043e\u043c  \u0442\u043e\u0439  \u0442\u043e\u043c  \u0442\u0435\u0445
-  |
-  | determinative pronouns
-  |
-  | (a) \u0432\u0435\u0441\u044c (all)
-  |
-  | \u0432\u0435\u0441\u044c  \u0432\u0441\u044f  \u0432\u0441\u0435  \u0432\u0441\u0435
-  | \u0432\u0441\u0435\u0433\u043e  \u0432\u0441\u044e  \u0432\u0441\u0435  \u0432\u0441\u0435
-  | \u0432\u0441\u0435\u0433\u043e  \u0432\u0441\u0435\u0439  \u0432\u0441\u0435\u0433\u043e  \u0432\u0441\u0435\u0445
-  | \u0432\u0441\u0435\u043c\u0443  \u0432\u0441\u0435\u0439  \u0432\u0441\u0435\u043c\u0443  \u0432\u0441\u0435\u043c
-  | \u0432\u0441\u0435\u043c  \u0432\u0441\u0435\u0439  \u0432\u0441\u0435\u043c  [\u0432\u0441\u0435\u044e]  \u0432\u0441\u0435\u043c\u0438
-  | \u0432\u0441\u0435\u043c  \u0432\u0441\u0435\u0439  \u0432\u0441\u0435\u043c  \u0432\u0441\u0435\u0445
-  |
-  | (b) \u0441\u0430\u043c (himself etc)
-  |
-  | \u0441\u0430\u043c  \u0441\u0430\u043c\u0430  \u0441\u0430\u043c\u043e  \u0441\u0430\u043c\u0438
-  | \u0441\u0430\u043c\u043e\u0433\u043e \u0441\u0430\u043c\u0443  \u0441\u0430\u043c\u043e  \u0441\u0430\u043c\u0438\u0445
-  | \u0441\u0430\u043c\u043e\u0433\u043e \u0441\u0430\u043c\u043e\u0439 \u0441\u0430\u043c\u043e\u0433\u043e  \u0441\u0430\u043c\u0438\u0445
-  | \u0441\u0430\u043c\u043e\u043c\u0443 \u0441\u0430\u043c\u043e\u0439 \u0441\u0430\u043c\u043e\u043c\u0443  \u0441\u0430\u043c\u0438\u043c
-  | \u0441\u0430\u043c\u0438\u043c  \u0441\u0430\u043c\u043e\u0439  \u0441\u0430\u043c\u0438\u043c  [\u0441\u0430\u043c\u043e\u044e]  \u0441\u0430\u043c\u0438\u043c\u0438
-  | \u0441\u0430\u043c\u043e\u043c \u0441\u0430\u043c\u043e\u0439 \u0441\u0430\u043c\u043e\u043c  \u0441\u0430\u043c\u0438\u0445
-  |
-  | stems of verbs `to be', `to have', `to do' and modal
-  |
-  | \u0431\u044b\u0442\u044c  \u0431\u044b  \u0431\u0443\u0434  \u0431\u044b\u0432  \u0435\u0441\u0442\u044c  \u0441\u0443\u0442\u044c
-  | \u0438\u043c\u0435
-  | \u0434\u0435\u043b
-  | \u043c\u043e\u0433   \u043c\u043e\u0436  \u043c\u043e\u0447\u044c
-  | \u0443\u043c\u0435
-  | \u0445\u043e\u0447  \u0445\u043e\u0442
-  | \u0434\u043e\u043b\u0436
-  | \u043c\u043e\u0436\u043d
-  | \u043d\u0443\u0436\u043d
-  | \u043d\u0435\u043b\u044c\u0437\u044f
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_sv.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_sv.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_sv.txt
deleted file mode 100644
index 096f87f..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_sv.txt
+++ /dev/null
@@ -1,133 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/swedish/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Swedish stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
- | This is a ranked list (commonest to rarest) of stopwords derived from
- | a large text sample.
-
- | Swedish stop words occasionally exhibit homonym clashes. For example
- |  s� = so, but also seed. These are indicated clearly below.
-
-och            | and
-det            | it, this/that
-att            | to (with infinitive)
-i              | in, at
-en             | a
-jag            | I
-hon            | she
-som            | who, that
-han            | he
-p�             | on
-den            | it, this/that
-med            | with
-var            | where, each
-sig            | him(self) etc
-f�r            | for
-s�             | so (also: seed)
-till           | to
-�r             | is
-men            | but
-ett            | a
-om             | if; around, about
-hade           | had
-de             | they, these/those
-av             | of
-icke           | not, no
-mig            | me
-du             | you
-henne          | her
-d�             | then, when
-sin            | his
-nu             | now
-har            | have
-inte           | inte n�gon = no one
-hans           | his
-honom          | him
-skulle         | 'sake'
-hennes         | her
-d�r            | there
-min            | my
-man            | one (pronoun)
-ej             | nor
-vid            | at, by, on (also: vast)
-kunde          | could
-n�got          | some etc
-fr�n           | from, off
-ut             | out
-n�r            | when
-efter          | after, behind
-upp            | up
-vi             | we
-dem            | them
-vara           | be
-vad            | what
-�ver           | over
-�n             | than
-dig            | you
-kan            | can
-sina           | his
-h�r            | here
-ha             | have
-mot            | towards
-alla           | all
-under          | under (also: wonder)
-n�gon          | some etc
-eller          | or (else)
-allt           | all
-mycket         | much
-sedan          | since
-ju             | why
-denna          | this/that
-sj�lv          | myself, yourself etc
-detta          | this/that
-�t             | to
-utan           | without
-varit          | was
-hur            | how
-ingen          | no
-mitt           | my
-ni             | you
-bli            | to be, become
-blev           | from bli
-oss            | us
-din            | thy
-dessa          | these/those
-n�gra          | some etc
-deras          | their
-blir           | from bli
-mina           | my
-samma          | (the) same
-vilken         | who, that
-er             | you, your
-s�dan          | such a
-v�r            | our
-blivit         | from bli
-dess           | its
-inom           | within
-mellan         | between
-s�dant         | such a
-varf�r         | why
-varje          | each
-vilka          | who, that
-ditt           | thy
-vem            | who
-vilket         | who, that
-sitta          | his
-s�dana         | such a
-vart           | each
-dina           | thy
-vars           | whose
-v�rt           | our
-v�ra           | our
-ert            | your
-era            | your
-vilkas         | whose
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_th.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_th.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_th.txt
deleted file mode 100644
index 07f0fab..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_th.txt
+++ /dev/null
@@ -1,119 +0,0 @@
-# Thai stopwords from:
-# "Opinion Detection in Thai Political News Columns
-# Based on Subjectivity Analysis"
-# Khampol Sukhum, Supot Nitsuwat, and Choochart Haruechaiyasak
-\u0e44\u0e27\u0e49
-\u0e44\u0e21\u0e48
-\u0e44\u0e1b
-\u0e44\u0e14\u0e49
-\u0e43\u0e2b\u0e49
-\u0e43\u0e19
-\u0e42\u0e14\u0e22
-\u0e41\u0e2b\u0e48\u0e07
-\u0e41\u0e25\u0e49\u0e27
-\u0e41\u0e25\u0e30
-\u0e41\u0e23\u0e01
-\u0e41\u0e1a\u0e1a
-\u0e41\u0e15\u0e48
-\u0e40\u0e2d\u0e07
-\u0e40\u0e2b\u0e47\u0e19
-\u0e40\u0e25\u0e22
-\u0e40\u0e23\u0e34\u0e48\u0e21
-\u0e40\u0e23\u0e32
-\u0e40\u0e21\u0e37\u0e48\u0e2d
-\u0e40\u0e1e\u0e37\u0e48\u0e2d
-\u0e40\u0e1e\u0e23\u0e32\u0e30
-\u0e40\u0e1b\u0e47\u0e19\u0e01\u0e32\u0e23
-\u0e40\u0e1b\u0e47\u0e19
-\u0e40\u0e1b\u0e34\u0e14\u0e40\u0e1c\u0e22
-\u0e40\u0e1b\u0e34\u0e14
-\u0e40\u0e19\u0e37\u0e48\u0e2d\u0e07\u0e08\u0e32\u0e01
-\u0e40\u0e14\u0e35\u0e22\u0e27\u0e01\u0e31\u0e19
-\u0e40\u0e14\u0e35\u0e22\u0e27
-\u0e40\u0e0a\u0e48\u0e19
-\u0e40\u0e09\u0e1e\u0e32\u0e30
-\u0e40\u0e04\u0e22
-\u0e40\u0e02\u0e49\u0e32
-\u0e40\u0e02\u0e32
-\u0e2d\u0e35\u0e01
-\u0e2d\u0e32\u0e08
-\u0e2d\u0e30\u0e44\u0e23
-\u0e2d\u0e2d\u0e01
-\u0e2d\u0e22\u0e48\u0e32\u0e07
-\u0e2d\u0e22\u0e39\u0e48
-\u0e2d\u0e22\u0e32\u0e01
-\u0e2b\u0e32\u0e01
-\u0e2b\u0e25\u0e32\u0e22
-\u0e2b\u0e25\u0e31\u0e07\u0e08\u0e32\u0e01
-\u0e2b\u0e25\u0e31\u0e07
-\u0e2b\u0e23\u0e37\u0e2d
-\u0e2b\u0e19\u0e36\u0e48\u0e07
-\u0e2a\u0e48\u0e27\u0e19
-\u0e2a\u0e48\u0e07
-\u0e2a\u0e38\u0e14
-\u0e2a\u0e4d\u0e32\u0e2b\u0e23\u0e31\u0e1a
-\u0e27\u0e48\u0e32
-\u0e27\u0e31\u0e19
-\u0e25\u0e07
-\u0e23\u0e48\u0e27\u0e21
-\u0e23\u0e32\u0e22
-\u0e23\u0e31\u0e1a
-\u0e23\u0e30\u0e2b\u0e27\u0e48\u0e32\u0e07
-\u0e23\u0e27\u0e21
-\u0e22\u0e31\u0e07
-\u0e21\u0e35
-\u0e21\u0e32\u0e01
-\u0e21\u0e32
-\u0e1e\u0e23\u0e49\u0e2d\u0e21
-\u0e1e\u0e1a
-\u0e1c\u0e48\u0e32\u0e19
-\u0e1c\u0e25
-\u0e1a\u0e32\u0e07
-\u0e19\u0e48\u0e32
-\u0e19\u0e35\u0e49
-\u0e19\u0e4d\u0e32
-\u0e19\u0e31\u0e49\u0e19
-\u0e19\u0e31\u0e01
-\u0e19\u0e2d\u0e01\u0e08\u0e32\u0e01
-\u0e17\u0e38\u0e01
-\u0e17\u0e35\u0e48\u0e2a\u0e38\u0e14
-\u0e17\u0e35\u0e48
-\u0e17\u0e4d\u0e32\u0e43\u0e2b\u0e49
-\u0e17\u0e4d\u0e32
-\u0e17\u0e32\u0e07
-\u0e17\u0e31\u0e49\u0e07\u0e19\u0e35\u0e49
-\u0e17\u0e31\u0e49\u0e07
-\u0e16\u0e49\u0e32
-\u0e16\u0e39\u0e01
-\u0e16\u0e36\u0e07
-\u0e15\u0e49\u0e2d\u0e07
-\u0e15\u0e48\u0e32\u0e07\u0e46
-\u0e15\u0e48\u0e32\u0e07
-\u0e15\u0e48\u0e2d
-\u0e15\u0e32\u0e21
-\u0e15\u0e31\u0e49\u0e07\u0e41\u0e15\u0e48
-\u0e15\u0e31\u0e49\u0e07
-\u0e14\u0e49\u0e32\u0e19
-\u0e14\u0e49\u0e27\u0e22
-\u0e14\u0e31\u0e07
-\u0e0b\u0e36\u0e48\u0e07
-\u0e0a\u0e48\u0e27\u0e07
-\u0e08\u0e36\u0e07
-\u0e08\u0e32\u0e01
-\u0e08\u0e31\u0e14
-\u0e08\u0e30
-\u0e04\u0e37\u0e2d
-\u0e04\u0e27\u0e32\u0e21
-\u0e04\u0e23\u0e31\u0e49\u0e07
-\u0e04\u0e07
-\u0e02\u0e36\u0e49\u0e19
-\u0e02\u0e2d\u0e07
-\u0e02\u0e2d
-\u0e02\u0e13\u0e30
-\u0e01\u0e48\u0e2d\u0e19
-\u0e01\u0e47
-\u0e01\u0e32\u0e23
-\u0e01\u0e31\u0e1a
-\u0e01\u0e31\u0e19
-\u0e01\u0e27\u0e48\u0e32
-\u0e01\u0e25\u0e48\u0e32\u0e27

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_tr.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_tr.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_tr.txt
deleted file mode 100644
index 84d9408..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_tr.txt
+++ /dev/null
@@ -1,212 +0,0 @@
-# Turkish stopwords from LUCENE-559
-# merged with the list from "Information Retrieval on Turkish Texts"
-#   (http://www.users.muohio.edu/canf/papers/JASIST2008offPrint.pdf)
-acaba
-altm\u0131\u015f
-alt\u0131
-ama
-ancak
-arada
-asl\u0131nda
-ayr\u0131ca
-bana
-baz\u0131
-belki
-ben
-benden
-beni
-benim
-beri
-be\u015f
-bile
-bin
-bir
-bir�ok
-biri
-birka�
-birkez
-bir\u015fey
-bir\u015feyi
-biz
-bize
-bizden
-bizi
-bizim
-b�yle
-b�ylece
-bu
-buna
-bunda
-bundan
-bunlar
-bunlar\u0131
-bunlar\u0131n
-bunu
-bunun
-burada
-�ok
-��nk�
-da
-daha
-dahi
-de
-defa
-de\u011fil
-di\u011fer
-diye
-doksan
-dokuz
-dolay\u0131
-dolay\u0131s\u0131yla
-d�rt
-edecek
-eden
-ederek
-edilecek
-ediliyor
-edilmesi
-ediyor
-e\u011fer
-elli
-en
-etmesi
-etti
-etti\u011fi
-etti\u011fini
-gibi
-g�re
-halen
-hangi
-hatta
-hem
-hen�z
-hep
-hepsi
-her
-herhangi
-herkesin
-hi�
-hi�bir
-i�in
-iki
-ile
-ilgili
-ise
-i\u015fte
-itibaren
-itibariyle
-kadar
-kar\u015f\u0131n
-katrilyon
-kendi
-kendilerine
-kendini
-kendisi
-kendisine
-kendisini
-kez
-ki
-kim
-kimden
-kime
-kimi
-kimse
-k\u0131rk
-milyar
-milyon
-mu
-m�
-m\u0131
-nas\u0131l
-ne
-neden
-nedenle
-nerde
-nerede
-nereye
-niye
-ni�in
-o
-olan
-olarak
-oldu
-oldu\u011fu
-oldu\u011funu
-olduklar\u0131n\u0131
-olmad\u0131
-olmad\u0131\u011f\u0131
-olmak
-olmas\u0131
-olmayan
-olmaz
-olsa
-olsun
-olup
-olur
-olursa
-oluyor
-on
-ona
-ondan
-onlar
-onlardan
-onlar\u0131
-onlar\u0131n
-onu
-onun
-otuz
-oysa
-�yle
-pek
-ra\u011fmen
-sadece
-sanki
-sekiz
-seksen
-sen
-senden
-seni
-senin
-siz
-sizden
-sizi
-sizin
-\u015fey
-\u015feyden
-\u015feyi
-\u015feyler
-\u015f�yle
-\u015fu
-\u015funa
-\u015funda
-\u015fundan
-\u015funlar\u0131
-\u015funu
-taraf\u0131ndan
-trilyon
-t�m
-��
-�zere
-var
-vard\u0131
-ve
-veya
-ya
-yani
-yapacak
-yap\u0131lan
-yap\u0131lmas\u0131
-yap\u0131yor
-yapmak
-yapt\u0131
-yapt\u0131\u011f\u0131
-yapt\u0131\u011f\u0131n\u0131
-yapt\u0131klar\u0131
-yedi
-yerine
-yetmi\u015f
-yine
-yirmi
-yoksa
-y�z
-zaten

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/userdict_ja.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/userdict_ja.txt b/solr/example/example-DIH/solr/rss/conf/lang/userdict_ja.txt
deleted file mode 100644
index 6f0368e..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/userdict_ja.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# This is a sample user dictionary for Kuromoji (JapaneseTokenizer)
-#
-# Add entries to this file in order to override the statistical model in terms
-# of segmentation, readings and part-of-speech tags.  Notice that entries do
-# not have weights since they are always used when found.  This is by-design
-# in order to maximize ease-of-use.
-#
-# Entries are defined using the following CSV format:
-#  <text>,<token 1> ... <token n>,<reading 1> ... <reading n>,<part-of-speech tag>
-#
-# Notice that a single half-width space separates tokens and readings, and
-# that the number tokens and readings must match exactly.
-#
-# Also notice that multiple entries with the same <text> is undefined.
-#
-# Whitespace only lines are ignored.  Comments are not allowed on entry lines.
-#
-
-# Custom segmentation for kanji compounds
-\u65e5\u672c\u7d4c\u6e08\u65b0\u805e,\u65e5\u672c \u7d4c\u6e08 \u65b0\u805e,\u30cb\u30db\u30f3 \u30b1\u30a4\u30b6\u30a4 \u30b7\u30f3\u30d6\u30f3,\u30ab\u30b9\u30bf\u30e0\u540d\u8a5e
-\u95a2\u897f\u56fd\u969b\u7a7a\u6e2f,\u95a2\u897f \u56fd\u969b \u7a7a\u6e2f,\u30ab\u30f3\u30b5\u30a4 \u30b3\u30af\u30b5\u30a4 \u30af\u30a6\u30b3\u30a6,\u30ab\u30b9\u30bf\u30e0\u540d\u8a5e
-
-# Custom segmentation for compound katakana
-\u30c8\u30fc\u30c8\u30d0\u30c3\u30b0,\u30c8\u30fc\u30c8 \u30d0\u30c3\u30b0,\u30c8\u30fc\u30c8 \u30d0\u30c3\u30b0,\u304b\u305a\u30ab\u30ca\u540d\u8a5e
-\u30b7\u30e7\u30eb\u30c0\u30fc\u30d0\u30c3\u30b0,\u30b7\u30e7\u30eb\u30c0\u30fc \u30d0\u30c3\u30b0,\u30b7\u30e7\u30eb\u30c0\u30fc \u30d0\u30c3\u30b0,\u304b\u305a\u30ab\u30ca\u540d\u8a5e
-
-# Custom reading for former sumo wrestler
-\u671d\u9752\u9f8d,\u671d\u9752\u9f8d,\u30a2\u30b5\u30b7\u30e7\u30a6\u30ea\u30e5\u30a6,\u30ab\u30b9\u30bf\u30e0\u4eba\u540d


[30/52] [abbrv] lucene-solr:jira/solr-9959: LUCENE-7753: Make fields static when possible.

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
index dccc2c6..b0721a2 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java
@@ -81,8 +81,8 @@ public class OverseerTest extends SolrTestCaseJ4 {
   private List<ZkStateReader> readers = new ArrayList<>();
   private List<HttpShardHandlerFactory> httpShardHandlerFactorys = new ArrayList<>();
   private List<UpdateShardHandler> updateShardHandlers = new ArrayList<>();
-  
-  final private String collection = SolrTestCaseJ4.DEFAULT_TEST_COLLECTION_NAME;
+
+  private static final String COLLECTION = SolrTestCaseJ4.DEFAULT_TEST_COLLECTION_NAME;
   
   public static class MockZKController{
     
@@ -271,17 +271,17 @@ public class OverseerTest extends SolrTestCaseJ4 {
       final int numShards=6;
       
       for (int i = 0; i < numShards; i++) {
-        assertNotNull("shard got no id?", zkController.publishState(collection, "core" + (i+1), "node" + (i+1), Replica.State.ACTIVE, 3));
+        assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i+1), "node" + (i+1), Replica.State.ACTIVE, 3));
       }
-      final Map<String,Replica> rmap = reader.getClusterState().getSlice(collection, "shard1").getReplicasMap();
+      final Map<String,Replica> rmap = reader.getClusterState().getSlice(COLLECTION, "shard1").getReplicasMap();
       assertEquals(rmap.toString(), 2, rmap.size());
-      assertEquals(rmap.toString(), 2, reader.getClusterState().getSlice(collection, "shard2").getReplicasMap().size());
-      assertEquals(rmap.toString(), 2, reader.getClusterState().getSlice(collection, "shard3").getReplicasMap().size());
+      assertEquals(rmap.toString(), 2, reader.getClusterState().getSlice(COLLECTION, "shard2").getReplicasMap().size());
+      assertEquals(rmap.toString(), 2, reader.getClusterState().getSlice(COLLECTION, "shard3").getReplicasMap().size());
       
       //make sure leaders are in cloud state
-      assertNotNull(reader.getLeaderUrl(collection, "shard1", 15000));
-      assertNotNull(reader.getLeaderUrl(collection, "shard2", 15000));
-      assertNotNull(reader.getLeaderUrl(collection, "shard3", 15000));
+      assertNotNull(reader.getLeaderUrl(COLLECTION, "shard1", 15000));
+      assertNotNull(reader.getLeaderUrl(COLLECTION, "shard2", 15000));
+      assertNotNull(reader.getLeaderUrl(COLLECTION, "shard3", 15000));
       
     } finally {
       close(zkClient);
@@ -321,17 +321,17 @@ public class OverseerTest extends SolrTestCaseJ4 {
       final int numShards=3;
       
       for (int i = 0; i < numShards; i++) {
-        assertNotNull("shard got no id?", zkController.publishState(collection, "core" + (i+1), "node" + (i+1), Replica.State.ACTIVE, 3));
+        assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i+1), "node" + (i+1), Replica.State.ACTIVE, 3));
       }
 
-      assertEquals(1, reader.getClusterState().getSlice(collection, "shard1").getReplicasMap().size());
-      assertEquals(1, reader.getClusterState().getSlice(collection, "shard2").getReplicasMap().size());
-      assertEquals(1, reader.getClusterState().getSlice(collection, "shard3").getReplicasMap().size());
+      assertEquals(1, reader.getClusterState().getSlice(COLLECTION, "shard1").getReplicasMap().size());
+      assertEquals(1, reader.getClusterState().getSlice(COLLECTION, "shard2").getReplicasMap().size());
+      assertEquals(1, reader.getClusterState().getSlice(COLLECTION, "shard3").getReplicasMap().size());
       
       //make sure leaders are in cloud state
-      assertNotNull(reader.getLeaderUrl(collection, "shard1", 15000));
-      assertNotNull(reader.getLeaderUrl(collection, "shard2", 15000));
-      assertNotNull(reader.getLeaderUrl(collection, "shard3", 15000));
+      assertNotNull(reader.getLeaderUrl(COLLECTION, "shard1", 15000));
+      assertNotNull(reader.getLeaderUrl(COLLECTION, "shard2", 15000));
+      assertNotNull(reader.getLeaderUrl(COLLECTION, "shard3", 15000));
       
       // publish a bad queue item
       String emptyCollectionName = "";
@@ -408,7 +408,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
           final String coreName = "core" + slot;
 
           try {
-            ids[slot] = controllers[slot % nodeCount].publishState(collection, coreName, "node" + slot, Replica.State.ACTIVE, sliceCount);
+            ids[slot] = controllers[slot % nodeCount].publishState(COLLECTION, coreName, "node" + slot, Replica.State.ACTIVE, sliceCount);
           } catch (Throwable e) {
             e.printStackTrace();
             fail("register threw exception:" + e.getClass());
@@ -429,7 +429,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       for (int i = 0; i < 40; i++) {
         cloudStateSliceCount = 0;
         ClusterState state = reader.getClusterState();
-        final Map<String,Slice> slices = state.getSlicesMap(collection);
+        final Map<String,Slice> slices = state.getSlicesMap(COLLECTION);
         if (slices != null) {
           for (String name : slices.keySet()) {
             cloudStateSliceCount += slices.get(name).getReplicasMap().size();
@@ -483,7 +483,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       
       //make sure leaders are in cloud state
       for (int i = 0; i < sliceCount; i++) {
-        assertNotNull(reader.getLeaderUrl(collection, "shard" + (i + 1), 15000));
+        assertNotNull(reader.getLeaderUrl(COLLECTION, "shard" + (i + 1), 15000));
       }
 
     } finally {
@@ -549,23 +549,23 @@ public class OverseerTest extends SolrTestCaseJ4 {
       ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
           ZkStateReader.NODE_NAME_PROP, "node1",
-          ZkStateReader.COLLECTION_PROP, collection,
+          ZkStateReader.COLLECTION_PROP, COLLECTION,
           ZkStateReader.CORE_NAME_PROP, "core1",
           ZkStateReader.ROLES_PROP, "",
           ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
       
       q.offer(Utils.toJSON(m));
       
-      waitForCollections(reader, collection);
+      waitForCollections(reader, COLLECTION);
 
       assertSame(reader.getClusterState().toString(), Replica.State.RECOVERING,
-          reader.getClusterState().getSlice(collection, "shard1").getReplica("core_node1").getState());
+          reader.getClusterState().getSlice(COLLECTION, "shard1").getReplica("core_node1").getState());
 
       //publish node state (active)
       m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
           ZkStateReader.NODE_NAME_PROP, "node1",
-          ZkStateReader.COLLECTION_PROP, collection,
+          ZkStateReader.COLLECTION_PROP, COLLECTION,
           ZkStateReader.CORE_NAME_PROP, "core1",
           ZkStateReader.ROLES_PROP, "",
           ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
@@ -634,25 +634,25 @@ public class OverseerTest extends SolrTestCaseJ4 {
       overseerClient = electNewOverseer(server.getZkAddress());
       
       Thread.sleep(1000);
-      mockController.publishState(collection, core, core_node,
+      mockController.publishState(COLLECTION, core, core_node,
           Replica.State.RECOVERING, numShards);
       
-      waitForCollections(reader, collection);
-      verifyReplicaStatus(reader, collection, "shard1", "core_node1", Replica.State.RECOVERING);
+      waitForCollections(reader, COLLECTION);
+      verifyReplicaStatus(reader, COLLECTION, "shard1", "core_node1", Replica.State.RECOVERING);
       
       int version = getClusterStateVersion(zkClient);
       
-      mockController.publishState(collection, core, core_node, Replica.State.ACTIVE,
+      mockController.publishState(COLLECTION, core, core_node, Replica.State.ACTIVE,
           numShards);
       
       while (version == getClusterStateVersion(zkClient));
 
-      verifyReplicaStatus(reader, collection, "shard1", "core_node1", Replica.State.ACTIVE);
+      verifyReplicaStatus(reader, COLLECTION, "shard1", "core_node1", Replica.State.ACTIVE);
       version = getClusterStateVersion(zkClient);
       overseerClient.close();
       Thread.sleep(1000); // wait for overseer to get killed
       
-      mockController.publishState(collection, core, core_node,
+      mockController.publishState(COLLECTION, core, core_node,
           Replica.State.RECOVERING, numShards);
       version = getClusterStateVersion(zkClient);
       
@@ -660,20 +660,20 @@ public class OverseerTest extends SolrTestCaseJ4 {
       
       while (version == getClusterStateVersion(zkClient));
 
-      verifyReplicaStatus(reader, collection, "shard1", "core_node1", Replica.State.RECOVERING);
+      verifyReplicaStatus(reader, COLLECTION, "shard1", "core_node1", Replica.State.RECOVERING);
       
       assertEquals("Live nodes count does not match", 1, reader
           .getClusterState().getLiveNodes().size());
       assertEquals(shard+" replica count does not match", 1, reader.getClusterState()
-          .getSlice(collection, shard).getReplicasMap().size());
+          .getSlice(COLLECTION, shard).getReplicasMap().size());
       version = getClusterStateVersion(zkClient);
-      mockController.publishState(collection, core, core_node, null, numShards);
+      mockController.publishState(COLLECTION, core, core_node, null, numShards);
       while (version == getClusterStateVersion(zkClient));
       Thread.sleep(500);
-      assertTrue(collection+" should remain after removal of the last core", // as of SOLR-5209 core removal does not cascade to remove the slice and collection
-          reader.getClusterState().hasCollection(collection));
+      assertTrue(COLLECTION +" should remain after removal of the last core", // as of SOLR-5209 core removal does not cascade to remove the slice and collection
+          reader.getClusterState().hasCollection(COLLECTION));
       assertTrue(core_node+" should be gone after publishing the null state",
-          null == reader.getClusterState().getCollection(collection).getReplica(core_node));
+          null == reader.getClusterState().getCollection(COLLECTION).getReplica(core_node));
     } finally {
       close(mockController);
       close(overseerClient);
@@ -723,7 +723,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       overseerElector.setup(ec);
       overseerElector.joinElection(ec, false);
 
-      mockController.publishState(collection, "core1", "core_node1", Replica.State.ACTIVE, 1);
+      mockController.publishState(COLLECTION, "core1", "core_node1", Replica.State.ACTIVE, 1);
 
       assertNotNull(overseer.getStats());
       assertTrue((overseer.getStats().getSuccessCount(OverseerAction.STATE.toLower())) > 0);
@@ -819,19 +819,19 @@ public class OverseerTest extends SolrTestCaseJ4 {
       for (int i = 0; i < atLeast(4); i++) {
         killCounter.incrementAndGet(); //for each round allow 1 kill
         mockController = new MockZKController(server.getZkAddress(), "node1");
-        mockController.publishState(collection, "core1", "node1", Replica.State.ACTIVE,1);
+        mockController.publishState(COLLECTION, "core1", "node1", Replica.State.ACTIVE,1);
         if(mockController2!=null) {
           mockController2.close();
           mockController2 = null;
         }
-        mockController.publishState(collection, "core1", "node1",Replica.State.RECOVERING,1);
+        mockController.publishState(COLLECTION, "core1", "node1",Replica.State.RECOVERING,1);
         mockController2 = new MockZKController(server.getZkAddress(), "node2");
-        mockController.publishState(collection, "core1", "node1", Replica.State.ACTIVE,1);
-        verifyShardLeader(reader, collection, "shard1", "core1");
-        mockController2.publishState(collection, "core4", "node2", Replica.State.ACTIVE ,1);
+        mockController.publishState(COLLECTION, "core1", "node1", Replica.State.ACTIVE,1);
+        verifyShardLeader(reader, COLLECTION, "shard1", "core1");
+        mockController2.publishState(COLLECTION, "core4", "node2", Replica.State.ACTIVE ,1);
         mockController.close();
         mockController = null;
-        verifyShardLeader(reader, collection, "shard1", "core4");
+        verifyShardLeader(reader, COLLECTION, "shard1", "core4");
       }
     } finally {
       if (killer != null) {
@@ -874,18 +874,18 @@ public class OverseerTest extends SolrTestCaseJ4 {
       
       overseerClient = electNewOverseer(server.getZkAddress());
 
-      mockController.publishState(collection, "core1", "core_node1", Replica.State.RECOVERING, 1);
+      mockController.publishState(COLLECTION, "core1", "core_node1", Replica.State.RECOVERING, 1);
 
       waitForCollections(reader, "collection1");
 
-      verifyReplicaStatus(reader, collection, "shard1", "core_node1", Replica.State.RECOVERING);
+      verifyReplicaStatus(reader, COLLECTION, "shard1", "core_node1", Replica.State.RECOVERING);
 
       mockController.close();
 
       int version = getClusterStateVersion(controllerClient);
       
       mockController = new MockZKController(server.getZkAddress(), "node1");
-      mockController.publishState(collection, "core1", "core_node1", Replica.State.RECOVERING, 1);
+      mockController.publishState(COLLECTION, "core1", "core_node1", Replica.State.RECOVERING, 1);
 
       while (version == reader.getClusterState().getZkClusterStateVersion()) {
         Thread.sleep(100);
@@ -940,11 +940,11 @@ public class OverseerTest extends SolrTestCaseJ4 {
       
       overseerClient = electNewOverseer(server.getZkAddress());
 
-      mockController.publishState(collection, "core1", "node1", Replica.State.RECOVERING, 12);
+      mockController.publishState(COLLECTION, "core1", "node1", Replica.State.RECOVERING, 12);
 
-      waitForCollections(reader, collection);
+      waitForCollections(reader, COLLECTION);
       
-      assertEquals("Slicecount does not match", 12, reader.getClusterState().getSlices(collection).size());
+      assertEquals("Slicecount does not match", 12, reader.getClusterState().getSlices(COLLECTION).size());
       
     } finally {
       close(overseerClient);
@@ -1117,7 +1117,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
           ZkStateReader.NODE_NAME_PROP, "node1",
           ZkStateReader.SHARD_ID_PROP, "s1",
-          ZkStateReader.COLLECTION_PROP, collection,
+          ZkStateReader.COLLECTION_PROP, COLLECTION,
           ZkStateReader.CORE_NAME_PROP, "core1",
           ZkStateReader.ROLES_PROP, "",
           ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
@@ -1126,7 +1126,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
           ZkStateReader.NODE_NAME_PROP, "node1",
           ZkStateReader.SHARD_ID_PROP, "s1",
-          ZkStateReader.COLLECTION_PROP, collection,
+          ZkStateReader.COLLECTION_PROP, COLLECTION,
           ZkStateReader.CORE_NAME_PROP, "core2",
           ZkStateReader.ROLES_PROP, "",
           ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
@@ -1140,19 +1140,19 @@ public class OverseerTest extends SolrTestCaseJ4 {
           ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
           ZkStateReader.NODE_NAME_PROP, "node1",
           ZkStateReader.SHARD_ID_PROP, "s1",
-          ZkStateReader.COLLECTION_PROP, collection,
+          ZkStateReader.COLLECTION_PROP, COLLECTION,
           ZkStateReader.CORE_NAME_PROP, "core3",
           ZkStateReader.ROLES_PROP, "",
           ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
       queue.offer(Utils.toJSON(m));
       
       for(int i=0;i<100;i++) {
-        Slice s = reader.getClusterState().getSlice(collection, "s1");
+        Slice s = reader.getClusterState().getSlice(COLLECTION, "s1");
         if(s!=null && s.getReplicasMap().size()==3) break;
         Thread.sleep(100);
       }
-      assertNotNull(reader.getClusterState().getSlice(collection, "s1"));
-      assertEquals(3, reader.getClusterState().getSlice(collection, "s1").getReplicasMap().size());
+      assertNotNull(reader.getClusterState().getSlice(COLLECTION, "s1"));
+      assertEquals(3, reader.getClusterState().getSlice(COLLECTION, "s1").getReplicasMap().size());
     } finally {
       close(overseerClient);
       close(zkClient);
@@ -1340,14 +1340,14 @@ public class OverseerTest extends SolrTestCaseJ4 {
       {
         final Integer maxShardsPerNode = numReplicas * numShards;
         ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
-            "name", collection,
+            "name", COLLECTION,
             ZkStateReader.NUM_SHARDS_PROP, numShards.toString(),
             ZkStateReader.REPLICATION_FACTOR, "1",
             ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode.toString()
             );
         q.offer(Utils.toJSON(m));
       }
-      waitForCollections(zkStateReader, collection);
+      waitForCollections(zkStateReader, COLLECTION);
 
       // create nodes with state recovering
       for (int rr = 1; rr <= numReplicas; ++rr) {
@@ -1357,7 +1357,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
               ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
               ZkStateReader.SHARD_ID_PROP, "shard"+ss,
               ZkStateReader.NODE_NAME_PROP, "node"+N,
-              ZkStateReader.COLLECTION_PROP, collection,
+              ZkStateReader.COLLECTION_PROP, COLLECTION,
               ZkStateReader.CORE_NAME_PROP, "core"+N,
               ZkStateReader.ROLES_PROP, "",
               ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
@@ -1369,7 +1369,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       for (int rr = 1; rr <= numReplicas; ++rr) {
         for (int ss = 1; ss <= numShards; ++ss) {
           final int N = (numReplicas-rr)*numShards + ss;
-          verifyReplicaStatus(zkStateReader, collection, "shard"+ss, "core_node"+N, Replica.State.RECOVERING);
+          verifyReplicaStatus(zkStateReader, COLLECTION, "shard"+ss, "core_node"+N, Replica.State.RECOVERING);
         }
       }
 
@@ -1380,7 +1380,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
           ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
               ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
               ZkStateReader.NODE_NAME_PROP, "node"+N,
-              ZkStateReader.COLLECTION_PROP, collection,
+              ZkStateReader.COLLECTION_PROP, COLLECTION,
               ZkStateReader.CORE_NAME_PROP, "core"+N,
               ZkStateReader.ROLES_PROP, "",
               ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
@@ -1392,7 +1392,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
       for (int rr = 1; rr <= numReplicas; ++rr) {
         for (int ss = 1; ss <= numShards; ++ss) {
           final int N = (numReplicas-rr)*numShards + ss;
-          verifyReplicaStatus(zkStateReader, collection, "shard"+ss, "core_node"+N, Replica.State.ACTIVE);
+          verifyReplicaStatus(zkStateReader, COLLECTION, "shard"+ss, "core_node"+N, Replica.State.ACTIVE);
         }
       }
 
@@ -1401,7 +1401,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
         for (int ss = 1; ss <= numShards; ++ss) {
           final int N = (numReplicas-rr)*numShards + ss;
           ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.DELETECORE.toLower(),
-              ZkStateReader.COLLECTION_PROP, collection,
+              ZkStateReader.COLLECTION_PROP, COLLECTION,
               ZkStateReader.CORE_NODE_NAME_PROP, "core_node"+N);
 
           q.offer(Utils.toJSON(m));
@@ -1409,23 +1409,23 @@ public class OverseerTest extends SolrTestCaseJ4 {
           {
             int iterationsLeft = 100;
             while (iterationsLeft-- > 0) {
-              final Slice slice = zkStateReader.getClusterState().getSlice(collection, "shard"+ss);
+              final Slice slice = zkStateReader.getClusterState().getSlice(COLLECTION, "shard"+ss);
               if (null == slice || null == slice.getReplicasMap().get("core_node"+N)) {
                 break;
               }
-              if (VERBOSE) log.info("still seeing {} shard{} core_node{}, rechecking in 50ms ({} iterations left)", collection, ss, N, iterationsLeft);
+              if (VERBOSE) log.info("still seeing {} shard{} core_node{}, rechecking in 50ms ({} iterations left)", COLLECTION, ss, N, iterationsLeft);
               Thread.sleep(50);
             }
           }
 
-          final DocCollection docCollection = zkStateReader.getClusterState().getCollection(collection);
-          assertTrue("found no "+collection, (null != docCollection));
+          final DocCollection docCollection = zkStateReader.getClusterState().getCollection(COLLECTION);
+          assertTrue("found no "+ COLLECTION, (null != docCollection));
 
           final Slice slice = docCollection.getSlice("shard"+ss);
-          assertTrue("found no "+collection+" shard"+ss+" slice after removal of replica "+rr+" of "+numReplicas, (null != slice));
+          assertTrue("found no "+ COLLECTION +" shard"+ss+" slice after removal of replica "+rr+" of "+numReplicas, (null != slice));
 
           final Collection<Replica> replicas = slice.getReplicas();
-          assertEquals("wrong number of "+collection+" shard"+ss+" replicas left, replicas="+replicas, numReplicas-rr, replicas.size());
+          assertEquals("wrong number of "+ COLLECTION +" shard"+ss+" replicas left, replicas="+replicas, numReplicas-rr, replicas.size());
         }
       }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/cloud/SegmentTerminateEarlyTestState.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/SegmentTerminateEarlyTestState.java b/solr/core/src/test/org/apache/solr/cloud/SegmentTerminateEarlyTestState.java
index 3fe12ed..9824e3d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SegmentTerminateEarlyTestState.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SegmentTerminateEarlyTestState.java
@@ -34,14 +34,14 @@ import org.apache.solr.response.SolrQueryResponse;
 
 class SegmentTerminateEarlyTestState {
 
-  final String keyField = "id";
+  static final String KEY_FIELD = "id";
 
   // for historic reasons, this is refered to as a "timestamp" field, but in actuallity is just an int
   // value representing a number of "minutes" between 0-60.
   // aka: I decided not to rename a million things while refactoring this test
-  public static final String timestampField = "timestamp_i_dvo";
-  public static final String oddField = "odd_l1"; // <dynamicField name="*_l1"  type="long"   indexed="true"  stored="true" multiValued="false"/>
-  public static final String quadField = "quad_l1"; // <dynamicField name="*_l1"  type="long"   indexed="true"  stored="true" multiValued="false"/>
+  public static final String TIMESTAMP_FIELD = "timestamp_i_dvo";
+  public static final String ODD_FIELD = "odd_l1"; // <dynamicField name="*_l1"  type="long"   indexed="true"  stored="true" multiValued="false"/>
+  public static final String QUAD_FIELD = "quad_l1"; // <dynamicField name="*_l1"  type="long"   indexed="true"  stored="true" multiValued="false"/>
 
   final Set<Integer> minTimestampDocKeys = new HashSet<>();
   final Set<Integer> maxTimestampDocKeys = new HashSet<>();
@@ -63,7 +63,7 @@ class SegmentTerminateEarlyTestState {
         ++numDocs;
         final Integer docKey = new Integer(numDocs);
         SolrInputDocument doc = new SolrInputDocument();
-        doc.setField(keyField, ""+docKey);
+        doc.setField(KEY_FIELD, ""+docKey);
         final int MM = rand.nextInt(60); // minutes
         if (minTimestampMM == null || MM <= minTimestampMM.intValue()) {
           if (minTimestampMM != null && MM < minTimestampMM.intValue()) {
@@ -79,9 +79,9 @@ class SegmentTerminateEarlyTestState {
           maxTimestampMM = new Integer(MM);
           maxTimestampDocKeys.add(docKey);
         }
-        doc.setField(timestampField, (Integer)MM);
-        doc.setField(oddField, ""+(numDocs % 2));
-        doc.setField(quadField, ""+(numDocs % 4)+1);
+        doc.setField(TIMESTAMP_FIELD, (Integer)MM);
+        doc.setField(ODD_FIELD, ""+(numDocs % 2));
+        doc.setField(QUAD_FIELD, ""+(numDocs % 4)+1);
         cloudSolrClient.add(doc);
       }
       cloudSolrClient.commit();
@@ -95,9 +95,9 @@ class SegmentTerminateEarlyTestState {
     TestMiniSolrCloudCluster.assertFalse(maxTimestampDocKeys.isEmpty());
     TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
     final Long oddFieldValue = new Long(maxTimestampDocKeys.iterator().next().intValue()%2);
-    final SolrQuery query = new SolrQuery(oddField+":"+oddFieldValue);
-    query.setSort(timestampField, SolrQuery.ORDER.desc);
-    query.setFields(keyField, oddField, timestampField);
+    final SolrQuery query = new SolrQuery(ODD_FIELD +":"+oddFieldValue);
+    query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.desc);
+    query.setFields(KEY_FIELD, ODD_FIELD, TIMESTAMP_FIELD);
     query.setRows(1);
     // CommonParams.SEGMENT_TERMINATE_EARLY parameter intentionally absent
     final QueryResponse rsp = cloudSolrClient.query(query);
@@ -106,9 +106,9 @@ class SegmentTerminateEarlyTestState {
     // check correctness of the first result
     if (rsp.getResults().getNumFound() > 0) {
       final SolrDocument solrDocument0 = rsp.getResults().get(0);
-      TestMiniSolrCloudCluster.assertTrue(keyField+" of ("+solrDocument0+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
-          maxTimestampDocKeys.contains(solrDocument0.getFieldValue(keyField)));
-      TestMiniSolrCloudCluster.assertEquals(oddField, oddFieldValue, solrDocument0.getFieldValue(oddField));
+      TestMiniSolrCloudCluster.assertTrue(KEY_FIELD +" of ("+solrDocument0+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
+          maxTimestampDocKeys.contains(solrDocument0.getFieldValue(KEY_FIELD)));
+      TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, solrDocument0.getFieldValue(ODD_FIELD));
     }
     // check segmentTerminatedEarly flag
     TestMiniSolrCloudCluster.assertNull("responseHeader.segmentTerminatedEarly present in "+rsp.getResponseHeader(),
@@ -119,9 +119,9 @@ class SegmentTerminateEarlyTestState {
     TestMiniSolrCloudCluster.assertFalse(maxTimestampDocKeys.isEmpty());
     TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
     final Long oddFieldValue = new Long(maxTimestampDocKeys.iterator().next().intValue()%2);
-    final SolrQuery query = new SolrQuery(oddField+":"+oddFieldValue);
-    query.setSort(timestampField, SolrQuery.ORDER.desc);
-    query.setFields(keyField, oddField, timestampField);
+    final SolrQuery query = new SolrQuery(ODD_FIELD +":"+oddFieldValue);
+    query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.desc);
+    query.setFields(KEY_FIELD, ODD_FIELD, TIMESTAMP_FIELD);
     final int rowsWanted = 1;
     query.setRows(rowsWanted);
     final Boolean shardsInfoWanted = (rand.nextBoolean() ? null : new Boolean(rand.nextBoolean()));
@@ -136,9 +136,9 @@ class SegmentTerminateEarlyTestState {
     // check correctness of the first result
     if (rsp.getResults().getNumFound() > 0) {
       final SolrDocument solrDocument0 = rsp.getResults().get(0);
-      TestMiniSolrCloudCluster.assertTrue(keyField+" of ("+solrDocument0+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
-          maxTimestampDocKeys.contains(solrDocument0.getFieldValue(keyField)));
-      TestMiniSolrCloudCluster.assertEquals(oddField, oddFieldValue, rsp.getResults().get(0).getFieldValue(oddField));
+      TestMiniSolrCloudCluster.assertTrue(KEY_FIELD +" of ("+solrDocument0+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
+          maxTimestampDocKeys.contains(solrDocument0.getFieldValue(KEY_FIELD)));
+      TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, rsp.getResults().get(0).getFieldValue(ODD_FIELD));
     }
     // check segmentTerminatedEarly flag
     TestMiniSolrCloudCluster.assertNotNull("responseHeader.segmentTerminatedEarly missing in "+rsp.getResponseHeader(),
@@ -167,9 +167,9 @@ class SegmentTerminateEarlyTestState {
     TestMiniSolrCloudCluster.assertFalse(maxTimestampDocKeys.isEmpty());
     TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
     final Long oddFieldValue = new Long(maxTimestampDocKeys.iterator().next().intValue()%2);
-    final SolrQuery query = new SolrQuery(oddField+":"+oddFieldValue);
-    query.setSort(timestampField, SolrQuery.ORDER.desc);
-    query.setFields(keyField, oddField, timestampField);
+    final SolrQuery query = new SolrQuery(ODD_FIELD +":"+oddFieldValue);
+    query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.desc);
+    query.setFields(KEY_FIELD, ODD_FIELD, TIMESTAMP_FIELD);
     query.setRows(1);
     final Boolean shardsInfoWanted = (rand.nextBoolean() ? null : new Boolean(rand.nextBoolean()));
     if (shardsInfoWanted != null) {
@@ -182,9 +182,9 @@ class SegmentTerminateEarlyTestState {
     // check correctness of the first result
     if (rsp.getResults().getNumFound() > 0) {
       final SolrDocument solrDocument0 = rsp.getResults().get(0);
-      TestMiniSolrCloudCluster.assertTrue(keyField+" of ("+solrDocument0+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
-          maxTimestampDocKeys.contains(solrDocument0.getFieldValue(keyField)));
-      TestMiniSolrCloudCluster.assertEquals(oddField, oddFieldValue, rsp.getResults().get(0).getFieldValue(oddField));
+      TestMiniSolrCloudCluster.assertTrue(KEY_FIELD +" of ("+solrDocument0+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
+          maxTimestampDocKeys.contains(solrDocument0.getFieldValue(KEY_FIELD)));
+      TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, rsp.getResults().get(0).getFieldValue(ODD_FIELD));
     }
     // check segmentTerminatedEarly flag
     TestMiniSolrCloudCluster.assertNull("responseHeader.segmentTerminatedEarly present in "+rsp.getResponseHeader(),
@@ -212,13 +212,13 @@ class SegmentTerminateEarlyTestState {
     TestMiniSolrCloudCluster.assertFalse(maxTimestampDocKeys.isEmpty());
     TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
     final Long oddFieldValue = new Long(maxTimestampDocKeys.iterator().next().intValue()%2);
-    final SolrQuery query = new SolrQuery(oddField+":"+oddFieldValue);
-    query.setSort(timestampField, SolrQuery.ORDER.desc);
-    query.setFields(keyField, oddField, timestampField);
+    final SolrQuery query = new SolrQuery(ODD_FIELD +":"+oddFieldValue);
+    query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.desc);
+    query.setFields(KEY_FIELD, ODD_FIELD, TIMESTAMP_FIELD);
     query.setRows(1);
     query.set(CommonParams.SEGMENT_TERMINATE_EARLY, true);
     TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not quad-able", (numDocs%4)==0);
-    query.add("group.field", quadField);
+    query.add("group.field", QUAD_FIELD);
     query.set("group", true);
     final QueryResponse rsp = cloudSolrClient.query(query);
     // check correctness of the results count
@@ -226,9 +226,9 @@ class SegmentTerminateEarlyTestState {
     // check correctness of the first result
     if (rsp.getGroupResponse().getValues().get(0).getMatches() > 0) {
       final SolrDocument solrDocument = rsp.getGroupResponse().getValues().get(0).getValues().get(0).getResult().get(0);
-      TestMiniSolrCloudCluster.assertTrue(keyField+" of ("+solrDocument+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
-          maxTimestampDocKeys.contains(solrDocument.getFieldValue(keyField)));
-      TestMiniSolrCloudCluster.assertEquals(oddField, oddFieldValue, solrDocument.getFieldValue(oddField));
+      TestMiniSolrCloudCluster.assertTrue(KEY_FIELD +" of ("+solrDocument+") is not in maxTimestampDocKeys("+maxTimestampDocKeys+")",
+          maxTimestampDocKeys.contains(solrDocument.getFieldValue(KEY_FIELD)));
+      TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, solrDocument.getFieldValue(ODD_FIELD));
     }
     // check segmentTerminatedEarly flag
     // at present segmentTerminateEarly cannot be used with grouped queries
@@ -240,9 +240,9 @@ class SegmentTerminateEarlyTestState {
     TestMiniSolrCloudCluster.assertFalse(minTimestampDocKeys.isEmpty());
     TestMiniSolrCloudCluster.assertTrue("numDocs="+numDocs+" is not even", (numDocs%2)==0);
     final Long oddFieldValue = new Long(minTimestampDocKeys.iterator().next().intValue()%2);
-    final SolrQuery query = new SolrQuery(oddField+":"+oddFieldValue);
-    query.setSort(timestampField, SolrQuery.ORDER.asc); // a sort order that is _not_ compatible with the merge sort order
-    query.setFields(keyField, oddField, timestampField);
+    final SolrQuery query = new SolrQuery(ODD_FIELD +":"+oddFieldValue);
+    query.setSort(TIMESTAMP_FIELD, SolrQuery.ORDER.asc); // a sort order that is _not_ compatible with the merge sort order
+    query.setFields(KEY_FIELD, ODD_FIELD, TIMESTAMP_FIELD);
     query.setRows(1);
     query.set(CommonParams.SEGMENT_TERMINATE_EARLY, true);
     final QueryResponse rsp = cloudSolrClient.query(query);
@@ -251,9 +251,9 @@ class SegmentTerminateEarlyTestState {
     // check correctness of the first result
     if (rsp.getResults().getNumFound() > 0) {
       final SolrDocument solrDocument0 = rsp.getResults().get(0);
-      TestMiniSolrCloudCluster.assertTrue(keyField+" of ("+solrDocument0+") is not in minTimestampDocKeys("+minTimestampDocKeys+")",
-          minTimestampDocKeys.contains(solrDocument0.getFieldValue(keyField)));
-      TestMiniSolrCloudCluster.assertEquals(oddField, oddFieldValue, solrDocument0.getFieldValue(oddField));
+      TestMiniSolrCloudCluster.assertTrue(KEY_FIELD +" of ("+solrDocument0+") is not in minTimestampDocKeys("+minTimestampDocKeys+")",
+          minTimestampDocKeys.contains(solrDocument0.getFieldValue(KEY_FIELD)));
+      TestMiniSolrCloudCluster.assertEquals(ODD_FIELD, oddFieldValue, solrDocument0.getFieldValue(ODD_FIELD));
     }
     // check segmentTerminatedEarly flag
     TestMiniSolrCloudCluster.assertNotNull("responseHeader.segmentTerminatedEarly missing in "+rsp.getResponseHeader(),

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java b/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java
index 2642814..5e9e180 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SolrXmlInZkTest.java
@@ -163,7 +163,7 @@ public class SolrXmlInZkTest extends SolrTestCaseJ4 {
 
   // Just a random port, I'm not going to use it but just check that the Solr instance constructed from the XML
   // file in ZK overrides the default port.
-  private final String XML_FOR_ZK =
+  private static final String XML_FOR_ZK =
       "<solr>" +
           "  <solrcloud>" +
           "    <str name=\"host\">127.0.0.1</str>" +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/cloud/TestSegmentSorting.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestSegmentSorting.java b/solr/core/src/test/org/apache/solr/cloud/TestSegmentSorting.java
index 5e6283a..fa2449d 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestSegmentSorting.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestSegmentSorting.java
@@ -123,7 +123,7 @@ public class TestSegmentSorting extends SolrCloudTestCase {
   public void testAtomicUpdateOfSegmentSortField() throws Exception {
 
     final CloudSolrClient cloudSolrClient = cluster.getSolrClient();
-    final String updateField = SegmentTerminateEarlyTestState.timestampField;
+    final String updateField = SegmentTerminateEarlyTestState.TIMESTAMP_FIELD;
 
     // sanity check that updateField is in fact a DocValues only field, meaning it
     // would normally be eligable for inplace updates -- if it weren't also used for merge sorting

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java b/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java
index 1ebb232..b4ec29f 100644
--- a/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java
+++ b/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java
@@ -66,7 +66,7 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
   final int indexingThreads = TEST_NIGHTLY ? 9 : 5;
   final int queryThreads = TEST_NIGHTLY ? 9 : 5;
 
-  final int resetInterval = 30 * 60; // minutes to report then delete everything
+  static final int RESET_INTERVAL = 30 * 60; // minutes to report then delete everything
   long cumulativeDocs = 0;
 
   String url;
@@ -165,7 +165,7 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
       int secondsRemaining = secondsToRun;
       do {
 
-        int cycleSeconds = Math.min(resetInterval, secondsRemaining);
+        int cycleSeconds = Math.min(RESET_INTERVAL, secondsRemaining);
         log.info(String.format(Locale.ROOT, "\n\n\n\n\nStarting a %,d second cycle, seconds left: %,d. Seconds run so far: %,d.",
             cycleSeconds, secondsRemaining, secondsRun));
 
@@ -177,7 +177,7 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
 
         queries.waitOnThreads();
 
-        secondsRemaining = Math.max(secondsRemaining - resetInterval, 0);
+        secondsRemaining = Math.max(secondsRemaining - RESET_INTERVAL, 0);
 
         checkResults(queryingClients.get(0), queries, idxer);
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/schema/SpatialRPTFieldTypeTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/SpatialRPTFieldTypeTest.java b/solr/core/src/test/org/apache/solr/schema/SpatialRPTFieldTypeTest.java
index a2afa2d..0ff9e3a 100644
--- a/solr/core/src/test/org/apache/solr/schema/SpatialRPTFieldTypeTest.java
+++ b/solr/core/src/test/org/apache/solr/schema/SpatialRPTFieldTypeTest.java
@@ -62,12 +62,12 @@ public class SpatialRPTFieldTypeTest extends AbstractBadConfigTestBase {
     System.clearProperty("managed.schema.mutable");
     System.clearProperty("enable.update.log");
   }
-  
-  final String INDEXED_COORDINATES = "25,82";
-  final String QUERY_COORDINATES = "24,81";
-  final String DISTANCE_DEGREES = "1.3520328";
-  final String DISTANCE_KILOMETERS = "150.33939";
-  final String DISTANCE_MILES = "93.416565";
+
+  static final String INDEXED_COORDINATES = "25,82";
+  static final String QUERY_COORDINATES = "24,81";
+  static final String DISTANCE_DEGREES = "1.3520328";
+  static final String DISTANCE_KILOMETERS = "150.33939";
+  static final String DISTANCE_MILES = "93.416565";
   
   public void testDistanceUnitsDegrees() throws Exception {
     setupRPTField("degrees", "true");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/search/TestRTGBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRTGBase.java b/solr/core/src/test/org/apache/solr/search/TestRTGBase.java
index bb1b08a..b2964d8 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRTGBase.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRTGBase.java
@@ -47,7 +47,7 @@ public class TestRTGBase extends SolrTestCaseJ4 {
   protected long snapshotCount;
   protected long committedModelClock;
   protected volatile int lastId;
-  protected final String field = "val_l";
+  protected static final String FIELD = "val_l";
   protected Object[] syncArr;
 
   protected Object globalLock = this;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java b/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java
index 28ecaa3..4ee4cb7 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java
@@ -667,7 +667,7 @@ public class TestRealTimeGet extends TestRTGBase {
                 }
 
                 Long version = null;
-                SolrInputDocument sd = sdoc("id", Integer.toString(id), field, Long.toString(nextVal));
+                SolrInputDocument sd = sdoc("id", Integer.toString(id), FIELD, Long.toString(nextVal));
 
                 if (opt) {
                   if (correct) {
@@ -762,7 +762,7 @@ public class TestRealTimeGet extends TestRTGBase {
                 // This is also correct when filteredOut==true
               } else {
                 assertEquals(1, doclist.size());
-                long foundVal = (Long)(((Map)doclist.get(0)).get(field));
+                long foundVal = (Long)(((Map)doclist.get(0)).get(FIELD));
                 long foundVer = (Long)(((Map)doclist.get(0)).get("_version_"));
                 if (filteredOut || foundVal < Math.abs(info.val)
                     || (foundVer == info.version && foundVal != info.val) ) {    // if the version matches, the val must

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/search/TestReloadDeadlock.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestReloadDeadlock.java b/solr/core/src/test/org/apache/solr/search/TestReloadDeadlock.java
index 383070c..32c91db 100644
--- a/solr/core/src/test/org/apache/solr/search/TestReloadDeadlock.java
+++ b/solr/core/src/test/org/apache/solr/search/TestReloadDeadlock.java
@@ -197,7 +197,7 @@ public class TestReloadDeadlock extends TestRTGBase {
   private void addDoc(int id, long nextVal, long version) throws Exception {
     ifVerbose("adding id", id, "val=", nextVal, "version", version);
 
-    Long returnedVersion = addAndGetVersion(sdoc("id", Integer.toString(id), field, Long.toString(nextVal),
+    Long returnedVersion = addAndGetVersion(sdoc("id", Integer.toString(id), FIELD, Long.toString(nextVal),
         "_version_", Long.toString(version)), params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
     if (returnedVersion != null) {
       assertEquals(version, returnedVersion.longValue());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/search/TestStressLucene.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestStressLucene.java b/solr/core/src/test/org/apache/solr/search/TestStressLucene.java
index 779be43..af2ef1d 100644
--- a/solr/core/src/test/org/apache/solr/search/TestStressLucene.java
+++ b/solr/core/src/test/org/apache/solr/search/TestStressLucene.java
@@ -226,7 +226,7 @@ public class TestStressLucene extends TestRTGBase {
                   if (tombstones) {
                     Document d = new Document();
                     d.add(new Field("id","-"+Integer.toString(id), idFt));
-                    d.add(new Field(field, Long.toString(nextVal), ft2));
+                    d.add(new Field(FIELD, Long.toString(nextVal), ft2));
                     verbose("adding tombstone for id",id,"val=",nextVal);
                     writer.updateDocument(new Term("id", "-"+Integer.toString(id)), d);
                   }
@@ -243,7 +243,7 @@ public class TestStressLucene extends TestRTGBase {
                   if (tombstones) {
                     Document d = new Document();
                     d.add(new Field("id","-"+Integer.toString(id), idFt));
-                    d.add(new Field(field, Long.toString(nextVal), ft2));
+                    d.add(new Field(FIELD, Long.toString(nextVal), ft2));
                     verbose("adding tombstone for id",id,"val=",nextVal);
                     writer.updateDocument(new Term("id", "-"+Integer.toString(id)), d);
                   }
@@ -258,7 +258,7 @@ public class TestStressLucene extends TestRTGBase {
                   // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
                   Document d = new Document();
                   d.add(new Field("id",Integer.toString(id), idFt));
-                  d.add(new Field(field, Long.toString(nextVal), ft2));
+                  d.add(new Field(FIELD, Long.toString(nextVal), ft2));
                   verbose("adding id",id,"val=",nextVal);
                   writer.updateDocument(new Term("id", Integer.toString(id)), d);
                   if (tombstones) {
@@ -337,7 +337,7 @@ public class TestStressLucene extends TestRTGBase {
                 }
                 assertTrue(docid >= 0);   // we should have found the document, or its tombstone
                 Document doc = r.document(docid);
-                long foundVal = Long.parseLong(doc.get(field));
+                long foundVal = Long.parseLong(doc.get(FIELD));
                 if (foundVal < Math.abs(val)) {
                   verbose("ERROR: id",id,"model_val=",val," foundVal=",foundVal,"reader=",reader);
                 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/search/TestStressRecovery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestStressRecovery.java b/solr/core/src/test/org/apache/solr/search/TestStressRecovery.java
index b6ecc2e..933700d 100644
--- a/solr/core/src/test/org/apache/solr/search/TestStressRecovery.java
+++ b/solr/core/src/test/org/apache/solr/search/TestStressRecovery.java
@@ -228,7 +228,7 @@ public class TestStressRecovery extends TestRTGBase {
               } else {
                 verbose("adding id", id, "val=", nextVal,"version",version);
 
-                Long returnedVersion = addAndGetVersion(sdoc("id", Integer.toString(id), field, Long.toString(nextVal), "_version_",Long.toString(version)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+                Long returnedVersion = addAndGetVersion(sdoc("id", Integer.toString(id), FIELD, Long.toString(nextVal), "_version_",Long.toString(version)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
                 if (returnedVersion != null) {
                   assertEquals(version, returnedVersion.longValue());
                 }
@@ -310,7 +310,7 @@ public class TestStressRecovery extends TestRTGBase {
                 // there's no info we can get back with a delete, so not much we can check without further synchronization
               } else {
                 assertEquals(1, doclist.size());
-                long foundVal = (Long)(((Map)doclist.get(0)).get(field));
+                long foundVal = (Long)(((Map)doclist.get(0)).get(FIELD));
                 long foundVer = (Long)(((Map)doclist.get(0)).get("_version_"));
                 if (foundVer < Math.abs(info.version)
                     || (foundVer == info.version && foundVal != info.val) ) {    // if the version matches, the val must

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/search/TestStressReorder.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestStressReorder.java b/solr/core/src/test/org/apache/solr/search/TestStressReorder.java
index bce1d75..e53fed0 100644
--- a/solr/core/src/test/org/apache/solr/search/TestStressReorder.java
+++ b/solr/core/src/test/org/apache/solr/search/TestStressReorder.java
@@ -223,7 +223,7 @@ public class TestStressReorder extends TestRTGBase {
               } else {
                 verbose("adding id", id, "val=", nextVal,"version",version);
 
-                Long returnedVersion = addAndGetVersion(sdoc("id", Integer.toString(id), field, Long.toString(nextVal), "_version_",Long.toString(version)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
+                Long returnedVersion = addAndGetVersion(sdoc("id", Integer.toString(id), FIELD, Long.toString(nextVal), "_version_",Long.toString(version)), params(DISTRIB_UPDATE_PARAM,FROM_LEADER));
                 if (returnedVersion != null) {
                   assertEquals(version, returnedVersion.longValue());
                 }
@@ -301,7 +301,7 @@ public class TestStressReorder extends TestRTGBase {
                 // there's no info we can get back with a delete, so not much we can check without further synchronization
               } else {
                 assertEquals(1, doclist.size());
-                long foundVal = (Long)(((Map)doclist.get(0)).get(field));
+                long foundVal = (Long)(((Map)doclist.get(0)).get(FIELD));
                 long foundVer = (Long)(((Map)doclist.get(0)).get("_version_"));
                 if (foundVer < Math.abs(info.version)
                     || (foundVer == info.version && foundVal != info.val) ) {    // if the version matches, the val must

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/search/TestStressUserVersions.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestStressUserVersions.java b/solr/core/src/test/org/apache/solr/search/TestStressUserVersions.java
index 4eea434..5272734 100644
--- a/solr/core/src/test/org/apache/solr/search/TestStressUserVersions.java
+++ b/solr/core/src/test/org/apache/solr/search/TestStressUserVersions.java
@@ -203,7 +203,7 @@ public class TestStressUserVersions extends TestRTGBase {
               } else {
                 verbose("adding id", id, "val=", nextVal,"version",version);
 
-                Long returnedVersion = addAndGetVersion(sdoc("id", Integer.toString(id), field, Long.toString(nextVal), vfield, Long.toString(version)), null);
+                Long returnedVersion = addAndGetVersion(sdoc("id", Integer.toString(id), FIELD, Long.toString(nextVal), vfield, Long.toString(version)), null);
 
                 // only update model if the version is newer
                 synchronized (model) {
@@ -282,7 +282,7 @@ public class TestStressUserVersions extends TestRTGBase {
                 long foundVer = (Long)(((Map)doclist.get(0)).get(vfield));
 
                 if (isLive) {
-                  long foundVal = (Long)(((Map)doclist.get(0)).get(field));
+                  long foundVal = (Long)(((Map)doclist.get(0)).get(FIELD));
                   if (foundVer < Math.abs(info.version)
                       || (foundVer == info.version && foundVal != info.val) ) {    // if the version matches, the val must
                     log.error("ERROR, id=" + id + " found=" + response + " model" + info);
@@ -290,7 +290,7 @@ public class TestStressUserVersions extends TestRTGBase {
                   }
                 } else {
                   // if the doc is deleted (via tombstone), it shouldn't have a value on it.
-                  assertNull( ((Map)doclist.get(0)).get(field) );
+                  assertNull( ((Map)doclist.get(0)).get(FIELD) );
 
                   if (foundVer < Math.abs(info.version)) {
                     log.error("ERROR, id=" + id + " found=" + response + " model" + info);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/test/org/apache/solr/search/TestStressVersions.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestStressVersions.java b/solr/core/src/test/org/apache/solr/search/TestStressVersions.java
index ed51ae5..15b0c68 100644
--- a/solr/core/src/test/org/apache/solr/search/TestStressVersions.java
+++ b/solr/core/src/test/org/apache/solr/search/TestStressVersions.java
@@ -172,7 +172,7 @@ public class TestStressVersions extends TestRTGBase {
                 verbose("adding id", id, "val=", nextVal);
 
                 // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
-                Long version = addAndGetVersion(sdoc("id", Integer.toString(id), field, Long.toString(nextVal)), null);
+                Long version = addAndGetVersion(sdoc("id", Integer.toString(id), FIELD, Long.toString(nextVal)), null);
                 assertTrue(version > 0);
 
                 // only update model if the version is newer
@@ -247,7 +247,7 @@ public class TestStressVersions extends TestRTGBase {
                 // there's no info we can get back with a delete, so not much we can check without further synchronization
               } else {
                 assertEquals(1, doclist.size());
-                long foundVal = (Long)(((Map)doclist.get(0)).get(field));
+                long foundVal = (Long)(((Map)doclist.get(0)).get(FIELD));
                 long foundVer = (Long)(((Map)doclist.get(0)).get("_version_"));
                 if (foundVer < Math.abs(info.version)
                     || (foundVer == info.version && foundVal != info.val) ) {    // if the version matches, the val must

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java b/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
index e2229c5..16ace15 100644
--- a/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
+++ b/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
@@ -47,10 +47,10 @@ import org.noggit.CharArr;
 public class TestJavaBinCodec extends SolrTestCaseJ4 {
 
   private static final String SOLRJ_JAVABIN_BACKCOMPAT_BIN = "/solrj/javabin_backcompat.bin";
-  private final String BIN_FILE_LOCATION = "./solr/solrj/src/test-files/solrj/javabin_backcompat.bin";
+  private static final String BIN_FILE_LOCATION = "./solr/solrj/src/test-files/solrj/javabin_backcompat.bin";
 
   private static final String SOLRJ_JAVABIN_BACKCOMPAT_BIN_CHILD_DOCS = "/solrj/javabin_backcompat_child_docs.bin";
-  private final String BIN_FILE_LOCATION_CHILD_DOCS = "./solr/solrj/src/test-files/solrj/javabin_backcompat_child_docs.bin";
+  private static final String BIN_FILE_LOCATION_CHILD_DOCS = "./solr/solrj/src/test-files/solrj/javabin_backcompat_child_docs.bin";
 
   public void testStrings() throws Exception {
     for (int i = 0; i < 10000 * RANDOM_MULTIPLIER; i++) {


[32/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-7452: refinement of missing buckets and partial facets through missing buckets

Posted by ab...@apache.org.
SOLR-7452: refinement of missing buckets and partial facets through missing buckets


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/cc623403
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/cc623403
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/cc623403

Branch: refs/heads/jira/solr-9959
Commit: cc623403bd82a176a057e1c9567f37d01f7391c4
Parents: e80643e
Author: yonik <yo...@apache.org>
Authored: Thu Mar 30 12:55:27 2017 -0400
Committer: yonik <yo...@apache.org>
Committed: Fri Mar 31 12:54:30 2017 -0400

----------------------------------------------------------------------
 .../solr/search/facet/FacetFieldMerger.java     | 25 +++++++++++++++++++
 .../solr/search/facet/FacetFieldProcessor.java  | 14 +++++++++++
 .../search/facet/FacetRequestSortedMerger.java  |  8 ++++++
 .../search/facet/TestJsonFacetRefinement.java   | 26 +++++++++++++++++++-
 4 files changed, 72 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/cc623403/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java
index 63e8743..f8f6463 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java
@@ -18,8 +18,11 @@
 package org.apache.solr.search.facet;
 
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.solr.common.util.SimpleOrderedMap;
@@ -167,7 +170,29 @@ public class FacetFieldMerger extends FacetRequestSortedMerger<FacetField> {
     // basically , only do at the top-level facet?
   }
 
+  @Override
+  Map<String, Object> getRefinementSpecial(Context mcontext, Map<String, Object> refinement, Collection<String> tagsWithPartial) {
+    if (!tagsWithPartial.isEmpty()) {
+      // Since special buckets missing and allBuckets themselves will always be included, we only need to worry about subfacets being partial.
+      if (freq.missing) {
+        refinement = getRefinementSpecial(mcontext, refinement, tagsWithPartial, missingBucket, "missing");
+      }
+      if (freq.allBuckets) {
+        refinement = getRefinementSpecial(mcontext, refinement, tagsWithPartial, allBuckets, "allBuckets");
+      }
+    }
+    return refinement;
+  }
 
+  private Map<String, Object> getRefinementSpecial(Context mcontext, Map<String, Object> refinement, Collection<String> tagsWithPartial, FacetBucket bucket, String label) {
+    // boolean prev = mcontext.setBucketWasMissing(true); // the special buckets should have the same "missing" status as this facet, so no need to set it again
+    Map<String, Object> bucketRefinement = bucket.getRefinement(mcontext, tagsWithPartial);
+    if (bucketRefinement != null) {
+      refinement = refinement == null ? new HashMap<>(2) : refinement;
+      refinement.put(label, bucketRefinement);
+    }
+    return refinement;
+  }
 
   private static class FacetNumBucketsMerger extends FacetMerger {
     long sumBuckets;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/cc623403/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
index a29e78d..d4daf08 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
@@ -36,6 +36,8 @@ import org.apache.solr.schema.FieldType;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.search.DocSet;
 
+import static org.apache.solr.search.facet.FacetContext.SKIP_FACET;
+
 /**
  * Facet processing based on field values. (not range nor by query)
  * @see FacetField
@@ -528,6 +530,9 @@ abstract class FacetFieldProcessor extends FacetProcessor<FacetField> {
   }
 
   protected SimpleOrderedMap<Object> refineFacets() throws IOException {
+    boolean skipThisFacet = (fcontext.flags & SKIP_FACET) != 0;
+
+
     List leaves = asList(fcontext.facetInfo.get("_l"));        // We have not seen this bucket: do full faceting for this bucket, including all sub-facets
     List<List> skip = asList(fcontext.facetInfo.get("_s"));    // We have seen this bucket, so skip stats on it, and skip sub-facets except for the specified sub-facets that should calculate specified buckets.
     List<List> partial = asList(fcontext.facetInfo.get("_p")); // We have not seen this bucket, do full faceting for this bucket, and most sub-facets... but some sub-facets are partial and should only visit specified buckets.
@@ -563,6 +568,15 @@ abstract class FacetFieldProcessor extends FacetProcessor<FacetField> {
       bucketList.add( refineBucket(bucketVal, false, facetInfo ) );
     }
 
+    if (freq.missing) {
+      Map<String,Object> bucketFacetInfo = (Map<String,Object>)fcontext.facetInfo.get("missing");
+
+      if (bucketFacetInfo != null || !skipThisFacet) {
+        SimpleOrderedMap<Object> missingBucket = new SimpleOrderedMap<>();
+        fillBucket(missingBucket, getFieldMissingQuery(fcontext.searcher, freq.field), null, skipThisFacet, bucketFacetInfo);
+        res.add("missing", missingBucket);
+      }
+    }
 
     // If there are just a couple of leaves, and if the domain is large, then
     // going by term is likely the most efficient?

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/cc623403/solr/core/src/java/org/apache/solr/search/facet/FacetRequestSortedMerger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetRequestSortedMerger.java b/solr/core/src/java/org/apache/solr/search/facet/FacetRequestSortedMerger.java
index e05064c..9ffdea7 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetRequestSortedMerger.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetRequestSortedMerger.java
@@ -240,6 +240,14 @@ abstract class FacetRequestSortedMerger<FacetRequestT extends FacetRequestSorted
       if (skipBuckets != null) refinement.put("_s", skipBuckets);
     }
 
+    refinement = getRefinementSpecial(mcontext, refinement, tagsWithPartial);
+
+    return refinement;
+  }
+
+  // utility method for subclasses to override to finish calculating faceting (special buckets in field facets)... this feels hacky and we
+  // should find a better way.
+  Map<String,Object> getRefinementSpecial(Context mcontext, Map<String,Object> refinement, Collection<String> tagsWithPartial) {
     return refinement;
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/cc623403/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
index b4b0220..52b8be4 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
@@ -219,6 +219,17 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
             "}"
     );
 
+    // test partial _p under a missing bucket
+    doTestRefine("{top:{type:terms, field:Afield, refine:true, limit:1, missing:true, facet:{x : {type:terms, field:X, limit:1, refine:true} } } }",
+        "{top: {buckets:[], missing:{count:12, x:{buckets:[{val:x2, count:4},{val:x3, count:2}]} }  } }",
+        "{top: {buckets:[], missing:{count:10, x:{buckets:[{val:x1, count:5},{val:x4, count:3}]} }  } }",
+        "=={top: {" +
+            "missing:{x:{_l:[x1]}}" +
+            "    }  " +
+            "}"
+        , null
+    );
+
   }
 
 
@@ -266,6 +277,17 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
     );
     ****/
 
+    // test refining under the special "missing" bucket of a field facet
+    client.testJQ(params(p, "q", "*:*",
+        "json.facet", "{" +
+            "f:{type:terms, field:missing_s, limit:1, overrequest:0, missing:true, refine:true,  facet:{  cat:{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true   }  }}" +
+            "}"
+        )
+        , "facets=={ count:8" +
+            ", f:{ buckets:[], missing:{count:8, cat:{buckets:[{val:A,count:4}]}  }  }" +  // just like the previous response, just nested under a field facet
+            "}"
+    );
+
 
     client.testJQ(params(p, "q", "*:*",
         "json.facet", "{" +
@@ -317,7 +339,7 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
             "}"
     );
 
-    // test missing buckets (field facet within field facet)
+    // test partial buckets (field facet within field facet)
     client.testJQ(params(p, "q", "*:*",
         "json.facet", "{" +
             "ab:{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true,  facet:{  xy:{type:terms, field:${xy_s}, limit:1, overrequest:0, refine:true   }  }}" +
@@ -345,6 +367,8 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
             "}"
     );
 
+
+
   }
 
 


[29/52] [abbrv] lucene-solr:jira/solr-9959: LUCENE-7761: Fixed comment in ReqExclScorer.

Posted by ab...@apache.org.
LUCENE-7761: Fixed comment in ReqExclScorer.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/23c6ea27
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/23c6ea27
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/23c6ea27

Branch: refs/heads/jira/solr-9959
Commit: 23c6ea274ea3e288a916acc160c2ca6c63d3c4dd
Parents: 0445f82
Author: Adrien Grand <jp...@gmail.com>
Authored: Fri Mar 31 16:11:19 2017 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Fri Mar 31 17:28:21 2017 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                                               | 3 +++
 lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23c6ea27/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index da643ff..c8a8deb 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -93,6 +93,9 @@ Other
 * LUCENE-7743: Never call new String(String).
   (Daniel Jelinski via Adrien Grand)
 
+* LUCENE-7761: Fixed comment in ReqExclScorer.
+  (Pablo Pita Leira via Adrien Grand)
+
 ======================= Lucene 6.5.1 =======================
 
 Bug Fixes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/23c6ea27/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
index 5ce6f5e..50a321b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
@@ -154,7 +154,7 @@ class ReqExclScorer extends Scorer {
         }
       };
     } else {
-      // reqTwoPhaseIterator is MORE costly than exclTwoPhaseIterator, check it first
+      // reqTwoPhaseIterator is MORE costly than exclTwoPhaseIterator, check it last
       return new TwoPhaseIterator(reqApproximation) {
 
         @Override


[42/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-9601: DIH Tika example is now minimal Only keep definitions and files required to show Tika-extraction in DIH

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b02626de/solr/example/example-DIH/solr/tika/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/tika/conf/solrconfig.xml b/solr/example/example-DIH/solr/tika/conf/solrconfig.xml
index ac0c22a..38d5d8b 100644
--- a/solr/example/example-DIH/solr/tika/conf/solrconfig.xml
+++ b/solr/example/example-DIH/solr/tika/conf/solrconfig.xml
@@ -17,1356 +17,42 @@
 -->
 
 <!-- 
-     For more details about configurations options that may appear in
-     this file, see http://wiki.apache.org/solr/SolrConfigXml. 
--->
-<config>
-  <!-- In all configuration below, a prefix of "solr." for class names
-       is an alias that causes solr to search appropriate packages,
-       including org.apache.solr.(search|update|request|core|analysis)
+ This is a DEMO configuration highlighting elements
+ specifically needed to get this example running
+ such as libraries and request handler specifics.
 
-       You may also specify a fully qualified Java classname if you
-       have your own custom plugins.
-    -->
+ It uses defaults or does not define most of production-level settings
+ such as various caches or auto-commit policies.
+
+ See Solr Reference Guide and other examples for
+ more details on a well configured solrconfig.xml
+ https://cwiki.apache.org/confluence/display/solr/The+Well-Configured+Solr+Instance
+-->
 
+<config>
   <!-- Controls what version of Lucene various components of Solr
-       adhere to.  Generally, you want to use the latest version to
-       get all bug fixes and improvements. It is highly recommended
-       that you fully re-index after changing this setting as it can
-       affect both how text is indexed and queried.
+   adhere to.  Generally, you want to use the latest version to
+   get all bug fixes and improvements. It is highly recommended
+   that you fully re-index after changing this setting as it can
+   affect both how text is indexed and queried.
   -->
   <luceneMatchVersion>7.0.0</luceneMatchVersion>
 
-  <!-- <lib/> directives can be used to instruct Solr to load any Jars
-       identified and use them to resolve any "plugins" specified in
-       your solrconfig.xml or schema.xml (ie: Analyzers, Request
-       Handlers, etc...).
-
-       All directories and paths are resolved relative to the
-       instanceDir.
-
-       Please note that <lib/> directives are processed in the order
-       that they appear in your solrconfig.xml file, and are "stacked" 
-       on top of each other when building a ClassLoader - so if you have 
-       plugin jars with dependencies on other jars, the "lower level" 
-       dependency jars should be loaded first.
-
-       If a "./lib" directory exists in your instanceDir, all files
-       found in it are included as if you had used the following
-       syntax...
-       
-              <lib dir="./lib" />
-    -->
-
-  <!-- A 'dir' option by itself adds any files found in the directory 
-       to the classpath, this is useful for including all jars in a
-       directory.
-
-       When a 'regex' is specified in addition to a 'dir', only the
-       files in that directory which completely match the regex
-       (anchored on both ends) will be included.
-
-       If a 'dir' option (with or without a regex) is used and nothing
-       is found that matches, a warning will be logged.
-
-       The examples below can be used to load some solr-contribs along 
-       with their external dependencies.
-    -->
-  <lib dir="${solr.install.dir:../../../..}/contrib/dataimporthandler/lib/" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-dataimporthandler-.*\.jar" />
-
-  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
-
-  <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
-
-  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
-
-  <!-- an exact 'path' can be used instead of a 'dir' to specify a 
-       specific jar file.  This will cause a serious error to be logged 
-       if it can't be loaded.
-    -->
-  <!--
-     <lib path="../a-jar-that-does-not-exist.jar" /> 
-  -->
-  
-  <!-- Data Directory
-
-       Used to specify an alternate directory to hold all index data
-       other than the default ./data under the Solr home.  If
-       replication is in use, this should match the replication
-       configuration.
-    -->
-  <dataDir>${solr.data.dir:}</dataDir>
-
-
-  <!-- The DirectoryFactory to use for indexes.
-       
-       solr.StandardDirectoryFactory is filesystem
-       based and tries to pick the best implementation for the current
-       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
-       wraps solr.StandardDirectoryFactory and caches small files in memory
-       for better NRT performance.
-
-       One can force a particular implementation via solr.MMapDirectoryFactory,
-       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
-
-       solr.RAMDirectoryFactory is memory based, not
-       persistent, and doesn't work with replication.
-    -->
-  <directoryFactory name="DirectoryFactory" 
-                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
-
-  <!-- The CodecFactory for defining the format of the inverted index.
-       The default implementation is SchemaCodecFactory, which is the official Lucene
-       index format, but hooks into the schema to provide per-field customization of
-       the postings lists and per-document values in the fieldType element
-       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
-       are experimental, so if you choose to customize the index format, it's a good
-       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
-       before upgrading to a newer version to avoid unnecessary reindexing.
-  -->
-  <codecFactory class="solr.SchemaCodecFactory"/>
-
-  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-       Index Config - These settings control low-level behavior of indexing
-       Most example settings here show the default value, but are commented
-       out, to more easily see where customizations have been made.
-       
-       Note: This replaces <indexDefaults> and <mainIndex> from older versions
-       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
-  <indexConfig>
-    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a 
-         LimitTokenCountFilterFactory in your fieldType definition. E.g. 
-     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
-    -->
-    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
-    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
-
-    <!-- Expert: Enabling compound file will use less files for the index, 
-         using fewer file descriptors on the expense of performance decrease. 
-         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
-    <!-- <useCompoundFile>false</useCompoundFile> -->
-
-    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
-         indexing for buffering added documents and deletions before they are
-         flushed to the Directory.
-         maxBufferedDocs sets a limit on the number of documents buffered
-         before flushing.
-         If both ramBufferSizeMB and maxBufferedDocs is set, then
-         Lucene will flush based on whichever limit is hit first.
-         The default is 100 MB.  -->
-    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
-    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
-
-    <!-- Expert: Merge Policy 
-         The Merge Policy in Lucene controls how merging of segments is done.
-         The default since Solr/Lucene 3.3 is TieredMergePolicy.
-         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
-         Even older versions of Lucene used LogDocMergePolicy.
-      -->
-    <!--
-        <mergePolicyFactory class="solr.TieredMergePolicyFactory">
-          <int name="maxMergeAtOnce">10</int>
-          <int name="segmentsPerTier">10</int>
-        </mergePolicyFactory>
-     -->
-
-    <!-- Expert: Merge Scheduler
-         The Merge Scheduler in Lucene controls how merges are
-         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
-         can perform merges in the background using separate threads.
-         The SerialMergeScheduler (Lucene 2.2 default) does not.
-     -->
-    <!-- 
-       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
-       -->
-
-    <!-- LockFactory 
-
-         This option specifies which Lucene LockFactory implementation
-         to use.
-      
-         single = SingleInstanceLockFactory - suggested for a
-                  read-only index or when there is no possibility of
-                  another process trying to modify the index.
-         native = NativeFSLockFactory - uses OS native file locking.
-                  Do not use when multiple solr webapps in the same
-                  JVM are attempting to share a single index.
-         simple = SimpleFSLockFactory  - uses a plain file for locking
-
-         Defaults: 'native' is default for Solr3.6 and later, otherwise
-                   'simple' is the default
-
-         More details on the nuances of each LockFactory...
-         http://wiki.apache.org/lucene-java/AvailableLockFactories
-    -->
-    <lockType>${solr.lock.type:native}</lockType>
-
-    <!-- Commit Deletion Policy
-         Custom deletion policies can be specified here. The class must
-         implement org.apache.lucene.index.IndexDeletionPolicy.
-
-         The default Solr IndexDeletionPolicy implementation supports
-         deleting index commit points on number of commits, age of
-         commit point and optimized status.
-         
-         The latest commit point should always be preserved regardless
-         of the criteria.
-    -->
-    <!-- 
-    <deletionPolicy class="solr.SolrDeletionPolicy">
-    -->
-      <!-- The number of commit points to be kept -->
-      <!-- <str name="maxCommitsToKeep">1</str> -->
-      <!-- The number of optimized commit points to be kept -->
-      <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
-      <!--
-          Delete all commit points once they have reached the given age.
-          Supports DateMathParser syntax e.g.
-        -->
-      <!--
-         <str name="maxCommitAge">30MINUTES</str>
-         <str name="maxCommitAge">1DAY</str>
-      -->
-    <!-- 
-    </deletionPolicy>
-    -->
-
-    <!-- Lucene Infostream
-       
-         To aid in advanced debugging, Lucene provides an "InfoStream"
-         of detailed information when indexing.
-
-         Setting the value to true will instruct the underlying Lucene
-         IndexWriter to write its info stream to solr's log. By default,
-         this is enabled here, and controlled through log4j.properties.
-      -->
-     <infoStream>true</infoStream>
-  </indexConfig>
-
-
-  <!-- JMX
-       
-       This example enables JMX if and only if an existing MBeanServer
-       is found, use this if you want to configure JMX through JVM
-       parameters. Remove this to disable exposing Solr configuration
-       and statistics to JMX.
-
-       For more details see http://wiki.apache.org/solr/SolrJmx
-    -->
-  <jmx />
-  <!-- If you want to connect to a particular server, specify the
-       agentId 
-    -->
-  <!-- <jmx agentId="myAgent" /> -->
-  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
-  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
-    -->
-
-  <!-- The default high-performance update handler -->
-  <updateHandler class="solr.DirectUpdateHandler2">
-
-    <!-- Enables a transaction log, used for real-time get, durability, and
-         and solr cloud replica recovery.  The log can grow as big as
-         uncommitted changes to the index, so use of a hard autoCommit
-         is recommended (see below).
-         "dir" - the target directory for transaction logs, defaults to the
-                solr data directory.  --> 
-    <!--
-    <updateLog>
-      <str name="dir">${solr.ulog.dir:}</str>
-    </updateLog>
-    -->
- 
-    <!-- AutoCommit
-
-         Perform a hard commit automatically under certain conditions.
-         Instead of enabling autoCommit, consider using "commitWithin"
-         when adding documents. 
-
-         http://wiki.apache.org/solr/UpdateXmlMessages
-
-         maxDocs - Maximum number of documents to add since the last
-                   commit before automatically triggering a new commit.
-
-         maxTime - Maximum amount of time in ms that is allowed to pass
-                   since a document was added before automatically
-                   triggering a new commit. 
-         openSearcher - if false, the commit causes recent index changes
-           to be flushed to stable storage, but does not cause a new
-           searcher to be opened to make those changes visible.
-
-         If the updateLog is enabled, then it's highly recommended to
-         have some sort of hard autoCommit to limit the log size.
-      -->
-     <autoCommit> 
-       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime> 
-       <openSearcher>false</openSearcher> 
-     </autoCommit>
-
-    <!-- softAutoCommit is like autoCommit except it causes a
-         'soft' commit which only ensures that changes are visible
-         but does not ensure that data is synced to disk.  This is
-         faster and more near-realtime friendly than a hard commit.
-      -->
-
-     <autoSoftCommit> 
-       <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime> 
-     </autoSoftCommit>
-
-    <!-- Update Related Event Listeners
-         
-         Various IndexWriter related events can trigger Listeners to
-         take actions.
-
-         postCommit - fired after every commit or optimize command
-         postOptimize - fired after every optimize command
-      -->
-    <!-- The RunExecutableListener executes an external command from a
-         hook such as postCommit or postOptimize.
-         
-         exe - the name of the executable to run
-         dir - dir to use as the current working directory. (default=".")
-         wait - the calling thread waits until the executable returns. 
-                (default="true")
-         args - the arguments to pass to the program.  (default is none)
-         env - environment variables to set.  (default is none)
-      -->
-    <!-- This example shows how RunExecutableListener could be used
-         with the script based replication...
-         http://wiki.apache.org/solr/CollectionDistribution
-      -->
-    <!--
-       <listener event="postCommit" class="solr.RunExecutableListener">
-         <str name="exe">solr/bin/snapshooter</str>
-         <str name="dir">.</str>
-         <bool name="wait">true</bool>
-         <arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
-         <arr name="env"> <str>MYVAR=val1</str> </arr>
-       </listener>
-      -->
-
-  </updateHandler>
-  
-  <!-- IndexReaderFactory
-
-       Use the following format to specify a custom IndexReaderFactory,
-       which allows for alternate IndexReader implementations.
-
-       ** Experimental Feature **
-
-       Please note - Using a custom IndexReaderFactory may prevent
-       certain other features from working. The API to
-       IndexReaderFactory may change without warning or may even be
-       removed from future releases if the problems cannot be
-       resolved.
-
-
-       ** Features that may not work with custom IndexReaderFactory **
-
-       The ReplicationHandler assumes a disk-resident index. Using a
-       custom IndexReader implementation may cause incompatibility
-       with ReplicationHandler and may cause replication to not work
-       correctly. See SOLR-1366 for details.
-
-    -->
-  <!--
-  <indexReaderFactory name="IndexReaderFactory" class="package.class">
-    <str name="someArg">Some Value</str>
-  </indexReaderFactory >
-  -->
-
-  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-       Query section - these settings control query time things like caches
-       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
-  <query>
-    <!-- Max Boolean Clauses
-
-         Maximum number of clauses in each BooleanQuery,  an exception
-         is thrown if exceeded.
-
-         ** WARNING **
-         
-         This option actually modifies a global Lucene property that
-         will affect all SolrCores.  If multiple solrconfig.xml files
-         disagree on this property, the value at any given moment will
-         be based on the last SolrCore to be initialized.
-         
-      -->
-    <maxBooleanClauses>1024</maxBooleanClauses>
-
-
-    <!-- Solr Internal Query Caches
-
-         There are two implementations of cache available for Solr,
-         LRUCache, based on a synchronized LinkedHashMap, and
-         FastLRUCache, based on a ConcurrentHashMap.  
-
-         FastLRUCache has faster gets and slower puts in single
-         threaded operation and thus is generally faster than LRUCache
-         when the hit ratio of the cache is high (> 75%), and may be
-         faster under other scenarios on multi-cpu systems.
-    -->
-
-    <!-- Filter Cache
-
-         Cache used by SolrIndexSearcher for filters (DocSets),
-         unordered sets of *all* documents that match a query.  When a
-         new searcher is opened, its caches may be prepopulated or
-         "autowarmed" using data from caches in the old searcher.
-         autowarmCount is the number of items to prepopulate.  For
-         LRUCache, the autowarmed items will be the most recently
-         accessed items.
-
-         Parameters:
-           class - the SolrCache implementation LRUCache or
-               (LRUCache or FastLRUCache)
-           size - the maximum number of entries in the cache
-           initialSize - the initial capacity (number of entries) of
-               the cache.  (see java.util.HashMap)
-           autowarmCount - the number of entries to prepopulate from
-               and old cache.  
-      -->
-    <filterCache class="solr.FastLRUCache"
-                 size="512"
-                 initialSize="512"
-                 autowarmCount="0"/>
-
-    <!-- Query Result Cache
-         
-         Caches results of searches - ordered lists of document ids
-         (DocList) based on a query, a sort, and the range of documents requested.  
-      -->
-    <queryResultCache class="solr.LRUCache"
-                     size="512"
-                     initialSize="512"
-                     autowarmCount="0"/>
-   
-    <!-- Document Cache
-
-         Caches Lucene Document objects (the stored fields for each
-         document).  Since Lucene internal document ids are transient,
-         this cache will not be autowarmed.  
-      -->
-    <documentCache class="solr.LRUCache"
-                   size="512"
-                   initialSize="512"
-                   autowarmCount="0"/>
-    
-    <!-- custom cache currently used by block join --> 
-    <cache name="perSegFilter"
-      class="solr.search.LRUCache"
-      size="10"
-      initialSize="0"
-      autowarmCount="10"
-      regenerator="solr.NoOpRegenerator" />
-
-    <!-- Field Value Cache
-         
-         Cache used to hold field values that are quickly accessible
-         by document id.  The fieldValueCache is created by default
-         even if not configured here.
-      -->
-    <!--
-       <fieldValueCache class="solr.FastLRUCache"
-                        size="512"
-                        autowarmCount="128"
-                        showItems="32" />
-      -->
-
-    <!-- Custom Cache
-
-         Example of a generic cache.  These caches may be accessed by
-         name through SolrIndexSearcher.getCache(),cacheLookup(), and
-         cacheInsert().  The purpose is to enable easy caching of
-         user/application level data.  The regenerator argument should
-         be specified as an implementation of solr.CacheRegenerator 
-         if autowarming is desired.  
-      -->
-    <!--
-       <cache name="myUserCache"
-              class="solr.LRUCache"
-              size="4096"
-              initialSize="1024"
-              autowarmCount="1024"
-              regenerator="com.mycompany.MyRegenerator"
-              />
-      -->
-
-
-    <!-- Lazy Field Loading
-
-         If true, stored fields that are not requested will be loaded
-         lazily.  This can result in a significant speed improvement
-         if the usual case is to not load all stored fields,
-         especially if the skipped fields are large compressed text
-         fields.
-    -->
-    <enableLazyFieldLoading>true</enableLazyFieldLoading>
-
-   <!-- Use Filter For Sorted Query
-
-        A possible optimization that attempts to use a filter to
-        satisfy a search.  If the requested sort does not include
-        score, then the filterCache will be checked for a filter
-        matching the query. If found, the filter will be used as the
-        source of document ids, and then the sort will be applied to
-        that.
-
-        For most situations, this will not be useful unless you
-        frequently get the same search repeatedly with different sort
-        options, and none of them ever use "score"
-     -->
-   <!--
-      <useFilterForSortedQuery>true</useFilterForSortedQuery>
-     -->
-
-   <!-- Result Window Size
-
-        An optimization for use with the queryResultCache.  When a search
-        is requested, a superset of the requested number of document ids
-        are collected.  For example, if a search for a particular query
-        requests matching documents 10 through 19, and queryWindowSize is 50,
-        then documents 0 through 49 will be collected and cached.  Any further
-        requests in that range can be satisfied via the cache.  
-     -->
-   <queryResultWindowSize>20</queryResultWindowSize>
-
-   <!-- Maximum number of documents to cache for any entry in the
-        queryResultCache. 
-     -->
-   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
-
-   <!-- Query Related Event Listeners
-
-        Various IndexSearcher related events can trigger Listeners to
-        take actions.
-
-        newSearcher - fired whenever a new searcher is being prepared
-        and there is a current searcher handling requests (aka
-        registered).  It can be used to prime certain caches to
-        prevent long request times for certain requests.
-
-        firstSearcher - fired whenever a new searcher is being
-        prepared but there is no current registered searcher to handle
-        requests or to gain autowarming data from.
-
-        
-     -->
-    <!-- QuerySenderListener takes an array of NamedList and executes a
-         local query request for each NamedList in sequence. 
-      -->
-    <listener event="newSearcher" class="solr.QuerySenderListener">
-      <arr name="queries">
-        <!--
-           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
-           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
-          -->
-      </arr>
-    </listener>
-    <listener event="firstSearcher" class="solr.QuerySenderListener">
-      <arr name="queries">
-        <lst>
-          <str name="q">static firstSearcher warming in solrconfig.xml</str>
-        </lst>
-      </arr>
-    </listener>
-
-    <!-- Use Cold Searcher
-
-         If a search request comes in and there is no current
-         registered searcher, then immediately register the still
-         warming searcher and use it.  If "false" then all requests
-         will block until the first searcher is done warming.
-      -->
-    <useColdSearcher>false</useColdSearcher>
-
-  </query>
-
-
-  <!-- Request Dispatcher
-
-       This section contains instructions for how the SolrDispatchFilter
-       should behave when processing requests for this SolrCore.
-
-       handleSelect is a legacy option that affects the behavior of requests
-       such as /select?qt=XXX
-
-       handleSelect="true" will cause the SolrDispatchFilter to process
-       the request and dispatch the query to a handler specified by the 
-       "qt" param, assuming "/select" isn't already registered.
-
-       handleSelect="false" will cause the SolrDispatchFilter to
-       ignore "/select" requests, resulting in a 404 unless a handler
-       is explicitly registered with the name "/select"
-
-       handleSelect="true" is not recommended for new users, but is the default
-       for backwards compatibility
-    -->
-  <requestDispatcher handleSelect="false" >
-    <!-- Request Parsing
-
-         These settings indicate how Solr Requests may be parsed, and
-         what restrictions may be placed on the ContentStreams from
-         those requests
-
-         enableRemoteStreaming - enables use of the stream.file
-         and stream.url parameters for specifying remote streams.
-
-         multipartUploadLimitInKB - specifies the max size (in KiB) of
-         Multipart File Uploads that Solr will allow in a Request.
-         
-         formdataUploadLimitInKB - specifies the max size (in KiB) of
-         form data (application/x-www-form-urlencoded) sent via
-         POST. You can use POST to pass request parameters not
-         fitting into the URL.
-         
-         addHttpRequestToContext - if set to true, it will instruct
-         the requestParsers to include the original HttpServletRequest
-         object in the context map of the SolrQueryRequest under the 
-         key "httpRequest". It will not be used by any of the existing
-         Solr components, but may be useful when developing custom 
-         plugins.
-         
-         *** WARNING ***
-         The settings below authorize Solr to fetch remote files, You
-         should make sure your system has some authentication before
-         using enableRemoteStreaming="true"
-
-      --> 
-    <requestParsers enableRemoteStreaming="true" 
-                    multipartUploadLimitInKB="2048000"
-                    formdataUploadLimitInKB="2048"
-                    addHttpRequestToContext="false"/>
-
-    <!-- HTTP Caching
-
-         Set HTTP caching related parameters (for proxy caches and clients).
-
-         The options below instruct Solr not to output any HTTP Caching
-         related headers
-      -->
-    <httpCaching never304="true" />
-    <!-- If you include a <cacheControl> directive, it will be used to
-         generate a Cache-Control header (as well as an Expires header
-         if the value contains "max-age=")
-         
-         By default, no Cache-Control header is generated.
-         
-         You can use the <cacheControl> option even if you have set
-         never304="true"
-      -->
-    <!--
-       <httpCaching never304="true" >
-         <cacheControl>max-age=30, public</cacheControl> 
-       </httpCaching>
-      -->
-    <!-- To enable Solr to respond with automatically generated HTTP
-         Caching headers, and to response to Cache Validation requests
-         correctly, set the value of never304="false"
-         
-         This will cause Solr to generate Last-Modified and ETag
-         headers based on the properties of the Index.
-
-         The following options can also be specified to affect the
-         values of these headers...
+  <!-- Load Data Import Handler and Apache Tika (extraction) libraries -->
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-dataimporthandler-.*\.jar"/>
+  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar"/>
 
-         lastModFrom - the default value is "openTime" which means the
-         Last-Modified value (and validation against If-Modified-Since
-         requests) will all be relative to when the current Searcher
-         was opened.  You can change it to lastModFrom="dirLastMod" if
-         you want the value to exactly correspond to when the physical
-         index was last modified.
-
-         etagSeed="..." is an option you can change to force the ETag
-         header (and validation against If-None-Match requests) to be
-         different even if the index has not changed (ie: when making
-         significant changes to your config file)
-
-         (lastModifiedFrom and etagSeed are both ignored if you use
-         the never304="true" option)
-      -->
-    <!--
-       <httpCaching lastModifiedFrom="openTime"
-                    etagSeed="Solr">
-         <cacheControl>max-age=30, public</cacheControl> 
-       </httpCaching>
-      -->
-  </requestDispatcher>
-
-  <!-- Request Handlers 
-
-       http://wiki.apache.org/solr/SolrRequestHandler
-
-       Incoming queries will be dispatched to a specific handler by name
-       based on the path specified in the request.
-
-       Legacy behavior: If the request path uses "/select" but no Request
-       Handler has that name, and if handleSelect="true" has been specified in
-       the requestDispatcher, then the Request Handler is dispatched based on
-       the qt parameter.  Handlers without a leading '/' are accessed this way
-       like so: http://host/app/[core/]select?qt=name  If no qt is
-       given, then the requestHandler that declares default="true" will be
-       used or the one named "standard".
-
-       If a Request Handler is declared with startup="lazy", then it will
-       not be initialized until the first request that uses it.
-
-    -->
-
-  <requestHandler name="/dataimport" class="solr.DataImportHandler">
-    <lst name="defaults">
-      <str name="config">tika-data-config.xml</str>
-    </lst>
-  </requestHandler>
-
-  <!-- SearchHandler
-
-       http://wiki.apache.org/solr/SearchHandler
-
-       For processing Search Queries, the primary Request Handler
-       provided with Solr is "SearchHandler" It delegates to a sequent
-       of SearchComponents (see below) and supports distributed
-       queries across multiple shards
-    -->
   <requestHandler name="/select" class="solr.SearchHandler">
-    <!-- default values for query parameters can be specified, these
-         will be overridden by parameters in the request
-      -->
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-       <int name="rows">10</int>
-       <str name="df">text</str>
-     </lst>
-    <!-- In addition to defaults, "appends" params can be specified
-         to identify values which should be appended to the list of
-         multi-val params from the query (or the existing "defaults").
-      -->
-    <!-- In this example, the param "fq=instock:true" would be appended to
-         any query time fq params the user may specify, as a mechanism for
-         partitioning the index, independent of any user selected filtering
-         that may also be desired (perhaps as a result of faceted searching).
-
-         NOTE: there is *absolutely* nothing a client can do to prevent these
-         "appends" values from being used, so don't use this mechanism
-         unless you are sure you always want it.
-      -->
-    <!--
-       <lst name="appends">
-         <str name="fq">inStock:true</str>
-       </lst>
-      -->
-    <!-- "invariants" are a way of letting the Solr maintainer lock down
-         the options available to Solr clients.  Any params values
-         specified here are used regardless of what values may be specified
-         in either the query, the "defaults", or the "appends" params.
-
-         In this example, the facet.field and facet.query params would
-         be fixed, limiting the facets clients can use.  Faceting is
-         not turned on by default - but if the client does specify
-         facet=true in the request, these are the only facets they
-         will be able to see counts for; regardless of what other
-         facet.field or facet.query params they may specify.
-
-         NOTE: there is *absolutely* nothing a client can do to prevent these
-         "invariants" values from being used, so don't use this mechanism
-         unless you are sure you always want it.
-      -->
-    <!--
-       <lst name="invariants">
-         <str name="facet.field">cat</str>
-         <str name="facet.field">manu_exact</str>
-         <str name="facet.query">price:[* TO 500]</str>
-         <str name="facet.query">price:[500 TO *]</str>
-       </lst>
-      -->
-    <!-- If the default list of SearchComponents is not desired, that
-         list can either be overridden completely, or components can be
-         prepended or appended to the default list.  (see below)
-      -->
-    <!--
-       <arr name="components">
-         <str>nameOfCustomComponent1</str>
-         <str>nameOfCustomComponent2</str>
-       </arr>
-      -->
-    </requestHandler>
-
-  <!-- A request handler that returns indented JSON by default -->
-  <requestHandler name="/query" class="solr.SearchHandler">
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-       <str name="wt">json</str>
-       <str name="indent">true</str>
-       <str name="df">text</str>
-     </lst>
-  </requestHandler>
-
-  <!-- A Robust Example
-       
-       This example SearchHandler declaration shows off usage of the
-       SearchHandler with many defaults declared
-
-       Note that multiple instances of the same Request Handler
-       (SearchHandler) can be registered multiple times with different
-       names (and different init parameters)
-    -->
-  <requestHandler name="/browse" class="solr.SearchHandler">
     <lst name="defaults">
       <str name="echoParams">explicit</str>
-
-      <!-- VelocityResponseWriter settings -->
-      <str name="wt">velocity</str>
-      <str name="v.template">browse</str>
-      <str name="v.layout">layout</str>
-
-      <!-- Query settings -->
-      <str name="defType">edismax</str>
-      <str name="q.alt">*:*</str>
-      <str name="rows">10</str>
-      <str name="fl">*,score</str>
-
-      <!-- Faceting defaults -->
-      <str name="facet">on</str>
-      <str name="facet.mincount">1</str>
-    </lst>
-  </requestHandler>
-
-  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse">
-    <lst name="defaults">
-      <str name="df">text</str>
-    </lst>
-  </initParams>
-
-  <!-- Solr Cell Update Request Handler
-
-       http://wiki.apache.org/solr/ExtractingRequestHandler 
-
-    -->
-  <requestHandler name="/update/extract" 
-                  startup="lazy"
-                  class="solr.extraction.ExtractingRequestHandler" >
-    <lst name="defaults">
-      <str name="lowernames">true</str>
-      <str name="uprefix">ignored_</str>
-
-      <!-- capture link hrefs but ignore div attributes -->
-      <str name="captureAttr">true</str>
-      <str name="fmap.a">links</str>
-      <str name="fmap.div">ignored_</str>
-    </lst>
-  </requestHandler>
-  <!-- Search Components
-
-       Search components are registered to SolrCore and used by 
-       instances of SearchHandler (which can access them by name)
-       
-       By default, the following components are available:
-       
-       <searchComponent name="query"     class="solr.QueryComponent" />
-       <searchComponent name="facet"     class="solr.FacetComponent" />
-       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
-       <searchComponent name="highlight" class="solr.HighlightComponent" />
-       <searchComponent name="stats"     class="solr.StatsComponent" />
-       <searchComponent name="debug"     class="solr.DebugComponent" />
-   
-       Default configuration in a requestHandler would look like:
-
-       <arr name="components">
-         <str>query</str>
-         <str>facet</str>
-         <str>mlt</str>
-         <str>highlight</str>
-         <str>stats</str>
-         <str>debug</str>
-       </arr>
-
-       If you register a searchComponent to one of the standard names, 
-       that will be used instead of the default.
-
-       To insert components before or after the 'standard' components, use:
-    
-       <arr name="first-components">
-         <str>myFirstComponentName</str>
-       </arr>
-    
-       <arr name="last-components">
-         <str>myLastComponentName</str>
-       </arr>
-
-       NOTE: The component registered with the name "debug" will
-       always be executed after the "last-components" 
-       
-     -->
-  
-   <!-- Spell Check
-
-        The spell check component can return a list of alternative spelling
-        suggestions.  
-
-        http://wiki.apache.org/solr/SpellCheckComponent
-     -->
-  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
-
-    <str name="queryAnalyzerFieldType">text_general</str>
-
-    <!-- Multiple "Spell Checkers" can be declared and used by this
-         component
-      -->
-
-    <!-- a spellchecker built from a field of the main index -->
-    <lst name="spellchecker">
-      <str name="name">default</str>
-      <str name="field">text</str>
-      <str name="classname">solr.DirectSolrSpellChecker</str>
-      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
-      <str name="distanceMeasure">internal</str>
-      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
-      <float name="accuracy">0.5</float>
-      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
-      <int name="maxEdits">2</int>
-      <!-- the minimum shared prefix when enumerating terms -->
-      <int name="minPrefix">1</int>
-      <!-- maximum number of inspections per result. -->
-      <int name="maxInspections">5</int>
-      <!-- minimum length of a query term to be considered for correction -->
-      <int name="minQueryLength">4</int>
-      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
-      <float name="maxQueryFrequency">0.01</float>
-      <!-- uncomment this to require suggestions to occur in 1% of the documents
-        <float name="thresholdTokenFrequency">.01</float>
-      -->
-    </lst>
-    
-    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
-    <lst name="spellchecker">
-      <str name="name">wordbreak</str>
-      <str name="classname">solr.WordBreakSolrSpellChecker</str>      
-      <str name="field">name</str>
-      <str name="combineWords">true</str>
-      <str name="breakWords">true</str>
-      <int name="maxChanges">10</int>
-    </lst>
-
-    <!-- a spellchecker that uses a different distance measure -->
-    <!--
-       <lst name="spellchecker">
-         <str name="name">jarowinkler</str>
-         <str name="field">spell</str>
-         <str name="classname">solr.DirectSolrSpellChecker</str>
-         <str name="distanceMeasure">
-           org.apache.lucene.search.spell.JaroWinklerDistance
-         </str>
-       </lst>
-     -->
-
-    <!-- a spellchecker that use an alternate comparator 
-
-         comparatorClass be one of:
-          1. score (default)
-          2. freq (Frequency first, then score)
-          3. A fully qualified class name
-      -->
-    <!--
-       <lst name="spellchecker">
-         <str name="name">freq</str>
-         <str name="field">lowerfilt</str>
-         <str name="classname">solr.DirectSolrSpellChecker</str>
-         <str name="comparatorClass">freq</str>
-      -->
-
-    <!-- A spellchecker that reads the list of words from a file -->
-    <!--
-       <lst name="spellchecker">
-         <str name="classname">solr.FileBasedSpellChecker</str>
-         <str name="name">file</str>
-         <str name="sourceLocation">spellings.txt</str>
-         <str name="characterEncoding">UTF-8</str>
-         <str name="spellcheckIndexDir">spellcheckerFile</str>
-       </lst>
-      -->
-  </searchComponent>
-  
-  <!-- A request handler for demonstrating the spellcheck component.  
-
-       NOTE: This is purely as an example.  The whole purpose of the
-       SpellCheckComponent is to hook it into the request handler that
-       handles your normal user queries so that a separate request is
-       not needed to get suggestions.
-
-       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
-       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
-       
-       See http://wiki.apache.org/solr/SpellCheckComponent for details
-       on the request parameters.
-    -->
-  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
-    <lst name="defaults">
       <str name="df">text</str>
-      <!-- Solr will use suggestions from both the 'default' spellchecker
-           and from the 'wordbreak' spellchecker and combine them.
-           collations (re-written queries) can include a combination of
-           corrections from both spellcheckers -->
-      <str name="spellcheck.dictionary">default</str>
-      <str name="spellcheck.dictionary">wordbreak</str>
-      <str name="spellcheck">on</str>
-      <str name="spellcheck.extendedResults">true</str>       
-      <str name="spellcheck.count">10</str>
-      <str name="spellcheck.alternativeTermCount">5</str>
-      <str name="spellcheck.maxResultsForSuggest">5</str>       
-      <str name="spellcheck.collate">true</str>
-      <str name="spellcheck.collateExtendedResults">true</str>  
-      <str name="spellcheck.maxCollationTries">10</str>
-      <str name="spellcheck.maxCollations">5</str>         
-    </lst>
-    <arr name="last-components">
-      <str>spellcheck</str>
-    </arr>
-  </requestHandler>
-
-  <searchComponent name="suggest" class="solr.SuggestComponent">
-    <lst name="suggester">
-      <str name="name">mySuggester</str>
-      <str name="lookupImpl">FuzzyLookupFactory</str>      <!-- org.apache.solr.spelling.suggest.fst -->
-      <str name="dictionaryImpl">DocumentDictionaryFactory</str>     <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory --> 
-      <str name="field">cat</str>
-      <str name="weightField">price</str>
-      <str name="suggestAnalyzerFieldType">string</str>
     </lst>
-  </searchComponent>
-
-  <requestHandler name="/suggest" class="solr.SearchHandler" startup="lazy">
-    <lst name="defaults">
-      <str name="suggest">true</str>
-      <str name="suggest.count">10</str>
-    </lst>
-    <arr name="components">
-      <str>suggest</str>
-    </arr>
   </requestHandler>
-  <!-- Term Vector Component
-
-       http://wiki.apache.org/solr/TermVectorComponent
-    -->
-  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
-
-  <!-- A request handler for demonstrating the term vector component
 
-       This is purely as an example.
-
-       In reality you will likely want to add the component to your 
-       already specified request handlers. 
-    -->
-  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
+  <requestHandler name="/dataimport" class="solr.DataImportHandler">
     <lst name="defaults">
-      <str name="df">text</str>
-      <bool name="tv">true</bool>
+      <str name="config">tika-data-config.xml</str>
     </lst>
-    <arr name="last-components">
-      <str>tvComponent</str>
-    </arr>
   </requestHandler>
 
-  <!-- Terms Component
-
-       http://wiki.apache.org/solr/TermsComponent
-
-       A component to return terms and document frequency of those
-       terms
-    -->
-  <searchComponent name="terms" class="solr.TermsComponent"/>
-
-  <!-- A request handler for demonstrating the terms component -->
-  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
-     <lst name="defaults">
-      <bool name="terms">true</bool>
-      <bool name="distrib">false</bool>
-    </lst>     
-    <arr name="components">
-      <str>terms</str>
-    </arr>
-  </requestHandler>
-
-
-  <!-- Highlighting Component
-
-       http://wiki.apache.org/solr/HighlightingParameters
-    -->
-  <searchComponent class="solr.HighlightComponent" name="highlight">
-    <highlighting>
-      <!-- Configure the standard fragmenter -->
-      <!-- This could most likely be commented out in the "default" case -->
-      <fragmenter name="gap" 
-                  default="true"
-                  class="solr.highlight.GapFragmenter">
-        <lst name="defaults">
-          <int name="hl.fragsize">100</int>
-        </lst>
-      </fragmenter>
-
-      <!-- A regular-expression-based fragmenter 
-           (for sentence extraction) 
-        -->
-      <fragmenter name="regex" 
-                  class="solr.highlight.RegexFragmenter">
-        <lst name="defaults">
-          <!-- slightly smaller fragsizes work better because of slop -->
-          <int name="hl.fragsize">70</int>
-          <!-- allow 50% slop on fragment sizes -->
-          <float name="hl.regex.slop">0.5</float>
-          <!-- a basic sentence pattern -->
-          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
-        </lst>
-      </fragmenter>
-
-      <!-- Configure the standard formatter -->
-      <formatter name="html" 
-                 default="true"
-                 class="solr.highlight.HtmlFormatter">
-        <lst name="defaults">
-          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
-          <str name="hl.simple.post"><![CDATA[</em>]]></str>
-        </lst>
-      </formatter>
-
-      <!-- Configure the standard encoder -->
-      <encoder name="html" 
-               class="solr.highlight.HtmlEncoder" />
-
-      <!-- Configure the standard fragListBuilder -->
-      <fragListBuilder name="simple" 
-                       class="solr.highlight.SimpleFragListBuilder"/>
-      
-      <!-- Configure the single fragListBuilder -->
-      <fragListBuilder name="single" 
-                       class="solr.highlight.SingleFragListBuilder"/>
-      
-      <!-- Configure the weighted fragListBuilder -->
-      <fragListBuilder name="weighted" 
-                       default="true"
-                       class="solr.highlight.WeightedFragListBuilder"/>
-      
-      <!-- default tag FragmentsBuilder -->
-      <fragmentsBuilder name="default" 
-                        default="true"
-                        class="solr.highlight.ScoreOrderFragmentsBuilder">
-        <!-- 
-        <lst name="defaults">
-          <str name="hl.multiValuedSeparatorChar">/</str>
-        </lst>
-        -->
-      </fragmentsBuilder>
-
-      <!-- multi-colored tag FragmentsBuilder -->
-      <fragmentsBuilder name="colored" 
-                        class="solr.highlight.ScoreOrderFragmentsBuilder">
-        <lst name="defaults">
-          <str name="hl.tag.pre"><![CDATA[
-               <b style="background:yellow">,<b style="background:lawgreen">,
-               <b style="background:aquamarine">,<b style="background:magenta">,
-               <b style="background:palegreen">,<b style="background:coral">,
-               <b style="background:wheat">,<b style="background:khaki">,
-               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
-          <str name="hl.tag.post"><![CDATA[</b>]]></str>
-        </lst>
-      </fragmentsBuilder>
-      
-      <boundaryScanner name="default" 
-                       default="true"
-                       class="solr.highlight.SimpleBoundaryScanner">
-        <lst name="defaults">
-          <str name="hl.bs.maxScan">10</str>
-          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
-        </lst>
-      </boundaryScanner>
-      
-      <boundaryScanner name="breakIterator" 
-                       class="solr.highlight.BreakIteratorBoundaryScanner">
-        <lst name="defaults">
-          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
-          <str name="hl.bs.type">WORD</str>
-          <!-- language and country are used when constructing Locale object.  -->
-          <!-- And the Locale object will be used when getting instance of BreakIterator -->
-          <str name="hl.bs.language">en</str>
-          <str name="hl.bs.country">US</str>
-        </lst>
-      </boundaryScanner>
-    </highlighting>
-  </searchComponent>
-
-  <!-- Update Processors
-
-       Chains of Update Processor Factories for dealing with Update
-       Requests can be declared, and then used by name in Update
-       Request Processors
-
-       http://wiki.apache.org/solr/UpdateRequestProcessor
-
-    --> 
-  <!-- Deduplication
-
-       An example dedup update processor that creates the "id" field
-       on the fly based on the hash code of some other fields.  This
-       example has overwriteDupes set to false since we are using the
-       id field as the signatureField and Solr will maintain
-       uniqueness based on that anyway.  
-       
-    -->
-  <!--
-     <updateRequestProcessorChain name="dedupe">
-       <processor class="solr.processor.SignatureUpdateProcessorFactory">
-         <bool name="enabled">true</bool>
-         <str name="signatureField">id</str>
-         <bool name="overwriteDupes">false</bool>
-         <str name="fields">name,features,cat</str>
-         <str name="signatureClass">solr.processor.Lookup3Signature</str>
-       </processor>
-       <processor class="solr.LogUpdateProcessorFactory" />
-       <processor class="solr.RunUpdateProcessorFactory" />
-     </updateRequestProcessorChain>
-    -->
-  
-  <!-- Language identification
-
-       This example update chain identifies the language of the incoming
-       documents using the langid contrib. The detected language is
-       written to field language_s. No field name mapping is done.
-       The fields used for detection are text, title, subject and description,
-       making this example suitable for detecting languages form full-text
-       rich documents injected via ExtractingRequestHandler.
-       See more about langId at http://wiki.apache.org/solr/LanguageDetection
-    -->
-    <!--
-     <updateRequestProcessorChain name="langid">
-       <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
-         <str name="langid.fl">text,title,subject,description</str>
-         <str name="langid.langField">language_s</str>
-         <str name="langid.fallback">en</str>
-       </processor>
-       <processor class="solr.LogUpdateProcessorFactory" />
-       <processor class="solr.RunUpdateProcessorFactory" />
-     </updateRequestProcessorChain>
-    -->
-
-  <!-- Script update processor
-
-    This example hooks in an update processor implemented using JavaScript.
-
-    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
-  -->
-  <!--
-    <updateRequestProcessorChain name="script">
-      <processor class="solr.StatelessScriptUpdateProcessorFactory">
-        <str name="script">update-script.js</str>
-        <lst name="params">
-          <str name="config_param">example config parameter</str>
-        </lst>
-      </processor>
-      <processor class="solr.RunUpdateProcessorFactory" />
-    </updateRequestProcessorChain>
-  -->
- 
-  <!-- Response Writers
-
-       http://wiki.apache.org/solr/QueryResponseWriter
-
-       Request responses will be written using the writer specified by
-       the 'wt' request parameter matching the name of a registered
-       writer.
-
-       The "default" writer is the default and will be used if 'wt' is
-       not specified in the request.
-    -->
-  <!-- The following response writers are implicitly configured unless
-       overridden...
-    -->
-  <!--
-     <queryResponseWriter name="xml" 
-                          default="true"
-                          class="solr.XMLResponseWriter" />
-     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
-     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
-     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
-     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
-     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
-     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
-     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
-    -->
-
-  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
-     <!-- For the purposes of the tutorial, JSON responses are written as
-      plain text so that they are easy to read in *any* browser.
-      If you expect a MIME type of "application/json" just remove this override.
-     -->
-    <str name="content-type">text/plain; charset=UTF-8</str>
-  </queryResponseWriter>
-  
-  <!--
-     Custom response writers can be declared as needed...
-    -->
-  <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
-    <str name="template.base.dir">${velocity.template.base.dir:}</str>
-  </queryResponseWriter>
-
-  <!-- XSLT response writer transforms the XML output by any xslt file found
-       in Solr's conf/xslt directory.  Changes to xslt files are checked for
-       every xsltCacheLifetimeSeconds.  
-    -->
-  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
-    <int name="xsltCacheLifetimeSeconds">5</int>
-  </queryResponseWriter>
-
-  <!-- Query Parsers
-
-       https://cwiki.apache.org/confluence/display/solr/Query+Syntax+and+Parsing
-
-       Multiple QParserPlugins can be registered by name, and then
-       used in either the "defType" param for the QueryComponent (used
-       by SearchHandler) or in LocalParams
-    -->
-  <!-- example of registering a query parser -->
-  <!--
-     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
-    -->
-
-  <!-- Function Parsers
-
-       http://wiki.apache.org/solr/FunctionQuery
-
-       Multiple ValueSourceParsers can be registered by name, and then
-       used as function names when using the "func" QParser.
-    -->
-  <!-- example of registering a custom function parser  -->
-  <!--
-     <valueSourceParser name="myfunc" 
-                        class="com.mycompany.MyValueSourceParser" />
-    -->
-    
-  
-  <!-- Document Transformers
-       http://wiki.apache.org/solr/DocTransformers
-    -->
-  <!--
-     Could be something like:
-     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
-       <int name="connection">jdbc://....</int>
-     </transformer>
-     
-     To add a constant value to all docs, use:
-     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
-       <int name="value">5</int>
-     </transformer>
-     
-     If you want the user to still be able to change it with _value:something_ use this:
-     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
-       <double name="defaultValue">5</double>
-     </transformer>
-
-      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
-      EditorialMarkerFactory will do exactly that:
-     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
-    -->
 </config>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b02626de/solr/example/example-DIH/solr/tika/conf/tika-data-config.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/tika/conf/tika-data-config.xml b/solr/example/example-DIH/solr/tika/conf/tika-data-config.xml
index 0d9e768..5286fc4 100644
--- a/solr/example/example-DIH/solr/tika/conf/tika-data-config.xml
+++ b/solr/example/example-DIH/solr/tika/conf/tika-data-config.xml
@@ -1,11 +1,26 @@
 <dataConfig>
-    <dataSource type="BinFileDataSource" />
-    <document>
-        <entity name="tika-test" processor="TikaEntityProcessor"
-                url="${solr.install.dir}/example/exampledocs/solr-word.pdf" format="text">
-                <field column="Author" name="author" meta="true"/>
-                <field column="title" name="title" meta="true"/>
-                <field column="text" name="text"/>
-        </entity>
-    </document>
+  <dataSource type="BinFileDataSource"/>
+  <document>
+    <entity name="file" processor="FileListEntityProcessor" dataSource="null"
+            baseDir="${solr.install.dir}/example/exampledocs" fileName=".*pdf"
+            rootEntity="false">
+
+      <field column="file" name="id"/>
+
+      <entity name="pdf" processor="TikaEntityProcessor"
+              url="${file.fileAbsolutePath}" format="text">
+
+        <field column="Author" name="author" meta="true"/>
+        <!-- in the original PDF, the Author meta-field name is upper-cased,
+          but in Solr schema it is lower-cased
+         -->
+
+        <field column="title" name="title" meta="true"/>
+        <field column="dc:format" name="format" meta="true"/>
+
+        <field column="text" name="text"/>
+
+      </entity>
+    </entity>
+  </document>
 </dataConfig>


[35/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10399: Generalize some internal facet logic to simplify points/non-points field handling

Posted by ab...@apache.org.
SOLR-10399: Generalize some internal facet logic to simplify points/non-points field handling


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b60b86ec
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b60b86ec
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b60b86ec

Branch: refs/heads/jira/solr-9959
Commit: b60b86ecab797396ab6bd8be82740191922b0aa2
Parents: 4135556
Author: Chris Hostetter <ho...@apache.org>
Authored: Fri Mar 31 17:01:42 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Fri Mar 31 17:01:42 2017 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                      |  2 ++
 .../java/org/apache/solr/request/SimpleFacets.java    | 14 +++-----------
 2 files changed, 5 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b60b86ec/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 097ee2b..1efefd0 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -178,6 +178,8 @@ Other Changes
 
 * SOLR-10338: Configure SecureRandom non blocking for tests. (Mihaly Toth, hossman, Ishan Chattopadhyaya, via Mark Miller)
 
+* SOLR-10399: Generalize some internal facet logic to simplify points/non-points field handling (Adrien Grand, hossman)
+
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b60b86ec/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
index b452802..8972121 100644
--- a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
@@ -851,17 +851,9 @@ public class SimpleFacets {
     SchemaField sf = searcher.getSchema().getField(field);
     FieldType ft = sf.getType();
     NamedList<Integer> res = new NamedList<>();
-    if (ft.isPointField()) {
-      for (String term : terms) {
-        int count = searcher.numDocs(ft.getFieldQuery(null, sf, term), parsed.docs);
-        res.add(term, count);
-      }
-    } else {
-      for (String term : terms) {
-        String internal = ft.toInternal(term);
-        int count = searcher.numDocs(new TermQuery(new Term(field, internal)), parsed.docs);
-        res.add(term, count);
-      }
+    for (String term : terms) {
+      int count = searcher.numDocs(ft.getFieldQuery(null, sf, term), parsed.docs);
+      res.add(term, count);
     }
     return res;    
   }


[39/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-7383: Replace DIH 'rss' example with 'atom' rss example was broken for multiple reasons. atom example showcases the same - and more - features and uses the smallest config file needed to make it work.

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/managed-schema b/solr/example/example-DIH/solr/rss/conf/managed-schema
deleted file mode 100644
index 9417902..0000000
--- a/solr/example/example-DIH/solr/rss/conf/managed-schema
+++ /dev/null
@@ -1,1096 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!--  
- This is the Solr schema file. This file should be named "schema.xml" and
- should be in the conf directory under the solr home
- (i.e. ./solr/conf/schema.xml by default) 
- or located where the classloader for the Solr webapp can find it.
-
- This example schema is the recommended starting point for users.
- It should be kept correct and concise, usable out-of-the-box.
-
- For more information, on how to customize this file, please see
- http://wiki.apache.org/solr/SchemaXml
-
- PERFORMANCE NOTE: this schema includes many optional features and should not
- be used for benchmarking.  To improve performance one could
-  - set stored="false" for all fields possible (esp large fields) when you
-    only need to search on the field but don't need to return the original
-    value.
-  - set indexed="false" if you don't need to search on the field, but only
-    return the field as a result of searching on other indexed fields.
-  - remove all unneeded copyField statements
-  - for best index size and searching performance, set "index" to false
-    for all general text fields, use copyField to copy them to the
-    catchall "text" field, and use that for searching.
-  - For maximum indexing performance, use the ConcurrentUpdateSolrServer
-    java client.
-  - Remember to run the JVM in server mode, and use a higher logging level
-    that avoids logging every request
--->
-
-<schema name="example-DIH-rss" version="1.6">
-  <!-- attribute "name" is the name of this schema and is only used for display purposes.
-       version="x.y" is Solr's version number for the schema syntax and 
-       semantics.  It should not normally be changed by applications.
-
-       1.0: multiValued attribute did not exist, all fields are multiValued 
-            by nature
-       1.1: multiValued attribute introduced, false by default 
-       1.2: omitTermFreqAndPositions attribute introduced, true by default 
-            except for text fields.
-       1.3: removed optional field compress feature
-       1.4: autoGeneratePhraseQueries attribute introduced to drive QueryParser
-            behavior when a single string produces multiple tokens.  Defaults 
-            to off for version >= 1.4
-       1.5: omitNorms defaults to true for primitive field types 
-            (int, float, boolean, string...)
-       1.6: useDocValuesAsStored defaults to true.            
-     -->
-
-
-   <!-- Valid attributes for fields:
-     name: mandatory - the name for the field
-     type: mandatory - the name of a field type from the 
-       fieldTypes
-     indexed: true if this field should be indexed (searchable or sortable)
-     stored: true if this field should be retrievable
-     docValues: true if this field should have doc values. Doc values are
-       useful for faceting, grouping, sorting and function queries. Although not
-       required, doc values will make the index faster to load, more
-       NRT-friendly and more memory-efficient. They however come with some
-       limitations: they are currently only supported by StrField, UUIDField
-       and all Trie*Fields, and depending on the field type, they might
-       require the field to be single-valued, be required or have a default
-       value (check the documentation of the field type you're interested in
-       for more information)
-     multiValued: true if this field may contain multiple values per document
-     omitNorms: (expert) set to true to omit the norms associated with
-       this field (this disables length normalization and index-time
-       boosting for the field, and saves some memory).  Only full-text
-       fields or fields that need an index-time boost need norms.
-       Norms are omitted for primitive (non-analyzed) types by default.
-     termVectors: [false] set to true to store the term vector for a
-       given field.
-       When using MoreLikeThis, fields used for similarity should be
-       stored for best performance.
-     termPositions: Store position information with the term vector.  
-       This will increase storage costs.
-     termOffsets: Store offset information with the term vector. This 
-       will increase storage costs.
-     required: The field is required.  It will throw an error if the
-       value does not exist
-     default: a value that should be used if no value is specified
-       when adding a document.
-   -->
-
-   <!-- field names should consist of alphanumeric or underscore characters only and
-      not start with a digit.  This is not currently strictly enforced,
-      but other field names will not have first class support from all components
-      and back compatibility is not guaranteed.  Names with both leading and
-      trailing underscores (e.g. _version_) are reserved.
-   -->
-
-   <!-- If you remove this field, you must _also_ disable the update log in solrconfig.xml
-      or Solr won't start. _version_ and update log are required for SolrCloud
-   --> 
-   <field name="_version_" type="long" indexed="true" stored="true"/>
-   
-   <field name="subject" type="text_general" indexed="true" stored="true"/>
-
-   <!-- catchall field, containing all other searchable text fields (implemented
-        via copyField further on in this schema  -->
-   <field name="text" type="text_general" indexed="true" stored="false" multiValued="true"/>
-
-   <field name="source" type="text_general" indexed="true" stored="true" />
-   <field name="source-link" type="string" indexed="false" stored="true" />
-
-   <field name="title" type="text_general" indexed="true" stored="true" />
-   <field name="description" type="html" indexed="true" stored="true" />
-
-   <field name="link" type="string" indexed="true" stored="true" required="true" multiValued="false" />
-   <field name="creator" type="string" indexed="false" stored="true" />
-   <field name="item-subject" type="string" indexed="true" stored="false" />
-   <field name="date" type="date" indexed="true" stored="false" />
-   <field name="slash-department" type="string" indexed="true" stored="false" />
-   <field name="slash-section" type="string" indexed="true" stored="false" />
-   <field name="slash-comments" type="int" indexed="true" stored="true" />
-
-   <!-- Dynamic field definitions allow using convention over configuration
-       for fields via the specification of patterns to match field names. 
-       EXAMPLE:  name="*_i" will match any field ending in _i (like myid_i, z_i)
-       RESTRICTION: the glob-like pattern in the name attribute must have
-       a "*" only at the start or the end.  -->
-   
-   <dynamicField name="*_i"  type="int"    indexed="true"  stored="true"/>
-   <dynamicField name="*_is" type="int"    indexed="true"  stored="true"  multiValued="true"/>
-   <dynamicField name="*_s"  type="string"  indexed="true"  stored="true" />
-   <dynamicField name="*_ss" type="string"  indexed="true"  stored="true" multiValued="true"/>
-   <dynamicField name="*_l"  type="long"   indexed="true"  stored="true"/>
-   <dynamicField name="*_ls" type="long"   indexed="true"  stored="true"  multiValued="true"/>
-   <dynamicField name="*_t"  type="text_general"    indexed="true"  stored="true"/>
-   <dynamicField name="*_txt" type="text_general"   indexed="true"  stored="true" multiValued="true"/>
-   <dynamicField name="*_en"  type="text_en"    indexed="true"  stored="true" multiValued="true"/>
-   <dynamicField name="*_b"  type="boolean" indexed="true" stored="true"/>
-   <dynamicField name="*_bs" type="boolean" indexed="true" stored="true"  multiValued="true"/>
-   <dynamicField name="*_f"  type="float"  indexed="true"  stored="true"/>
-   <dynamicField name="*_fs" type="float"  indexed="true"  stored="true"  multiValued="true"/>
-   <dynamicField name="*_d"  type="double" indexed="true"  stored="true"/>
-   <dynamicField name="*_ds" type="double" indexed="true"  stored="true"  multiValued="true"/>
-
-   <!-- Type used to index the lat and lon components for the "location" FieldType -->
-   <dynamicField name="*_coordinate"  type="tdouble" indexed="true"  stored="false" />
-
-   <dynamicField name="*_dt"  type="date"    indexed="true"  stored="true"/>
-   <dynamicField name="*_dts" type="date"    indexed="true"  stored="true" multiValued="true"/>
-   <dynamicField name="*_p"  type="location" indexed="true" stored="true"/>
-
-   <!-- some trie-coded dynamic fields for faster range queries -->
-   <dynamicField name="*_ti" type="tint"    indexed="true"  stored="true"/>
-   <dynamicField name="*_tl" type="tlong"   indexed="true"  stored="true"/>
-   <dynamicField name="*_tf" type="tfloat"  indexed="true"  stored="true"/>
-   <dynamicField name="*_td" type="tdouble" indexed="true"  stored="true"/>
-   <dynamicField name="*_tdt" type="tdate"  indexed="true"  stored="true"/>
-
-   <dynamicField name="*_c"   type="currency" indexed="true"  stored="true"/>
-
-   <dynamicField name="ignored_*" type="ignored" multiValued="true"/>
-   <dynamicField name="attr_*" type="text_general" indexed="true" stored="true" multiValued="true"/>
-
-   <dynamicField name="random_*" type="random" />
-
-   <!-- uncomment the following to ignore any fields that don't already match an existing 
-        field name or dynamic field, rather than reporting them as an error. 
-        alternately, change the type="ignored" to some other type e.g. "text" if you want 
-        unknown fields indexed and/or stored by default --> 
-   <!--dynamicField name="*" type="ignored" multiValued="true" /-->
-   
-
-
-
- <!-- Field to use to determine and enforce document uniqueness. 
-      Unless this field is marked with required="false", it will be a required field
-   -->
- <uniqueKey>link</uniqueKey>
-
- <!-- DEPRECATED: The defaultSearchField is consulted by various query parsers when
-  parsing a query string that isn't explicit about the field.  Machine (non-user)
-  generated queries are best made explicit, or they can use the "df" request parameter
-  which takes precedence over this.
-  Note: Un-commenting defaultSearchField will be insufficient if your request handler
-  in solrconfig.xml defines "df", which takes precedence. That would need to be removed.
- <defaultSearchField>text</defaultSearchField> -->
-
- <!-- DEPRECATED: The defaultOperator (AND|OR) is consulted by various query parsers
-  when parsing a query string to determine if a clause of the query should be marked as
-  required or optional, assuming the clause isn't already marked by some operator.
-  The default is OR, which is generally assumed so it is not a good idea to change it
-  globally here.  The "q.op" request parameter takes precedence over this.
- <solrQueryParser defaultOperator="OR"/> -->
-
-  <!-- copyField commands copy one field to another at the time a document
-        is added to the index.  It's used either to index the same field differently,
-        or to add multiple fields to the same field for easier/faster searching.  -->
-
-    <copyField source="source" dest="text"/>
-    <copyField source="subject" dest="text"/>
-    <copyField source="title" dest="text"/>
-    <copyField source="description" dest="text"/>
-    <copyField source="creator" dest="text"/>
-    <copyField source="item-subject" dest="text"/>
-
-   <!-- Above, multiple source fields are copied to the [text] field. 
-    Another way to map multiple source fields to the same 
-    destination field is to use the dynamic field syntax. 
-    copyField also supports a maxChars to copy setting.  -->
-     
-   <!-- <copyField source="*_t" dest="text" maxChars="3000"/> -->
-
-   <!-- copy name to alphaNameSort, a field designed for sorting by name -->
-   <!-- <copyField source="name" dest="alphaNameSort"/> -->
- 
-  
-    <!-- field type definitions. The "name" attribute is
-       just a label to be used by field definitions.  The "class"
-       attribute and any other attributes determine the real
-       behavior of the fieldType.
-         Class names starting with "solr" refer to java classes in a
-       standard package such as org.apache.solr.analysis
-    -->
-
-    <fieldType name="html" stored="true" indexed="true" class="solr.TextField">
-      <analyzer type="index">
-        <charFilter class="solr.HTMLStripCharFilterFactory"/>
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
-        -->
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-        <filter class="solr.PorterStemFilterFactory"/>
-        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-        <filter class="solr.FlattenGraphFilterFactory" />
-      </analyzer>
-      <analyzer type="query">
-        <charFilter class="solr.HTMLStripCharFilterFactory"/>
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-        <filter class="solr.PorterStemFilterFactory"/>
-        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- The StrField type is not analyzed, but indexed/stored verbatim.
-       It supports doc values but in that case the field needs to be
-       single-valued and either required or have a default value.
-      -->
-    <fieldType name="string" class="solr.StrField" sortMissingLast="true" />
-
-    <!-- boolean type: "true" or "false" -->
-    <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
-
-    <!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
-         currently supported on types that are sorted internally as strings
-         and on numeric types.
-       This includes "string","boolean", and, as of 3.5 (and 4.x),
-       int, float, long, date, double, including the "Trie" variants.
-       - If sortMissingLast="true", then a sort on this field will cause documents
-         without the field to come after documents with the field,
-         regardless of the requested sort order (asc or desc).
-       - If sortMissingFirst="true", then a sort on this field will cause documents
-         without the field to come before documents with the field,
-         regardless of the requested sort order.
-       - If sortMissingLast="false" and sortMissingFirst="false" (the default),
-         then default lucene sorting will be used which places docs without the
-         field first in an ascending sort and last in a descending sort.
-    -->    
-
-    <!--
-      Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
-
-      These fields support doc values, but they require the field to be
-      single-valued and either be required or have a default value.
-    -->
-    <fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
-    <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
-    <fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
-    <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
-
-    <!--
-     Numeric field types that index each value at various levels of precision
-     to accelerate range queries when the number of values between the range
-     endpoints is large. See the javadoc for NumericRangeQuery for internal
-     implementation details.
-
-     Smaller precisionStep values (specified in bits) will lead to more tokens
-     indexed per value, slightly larger index size, and faster range queries.
-     A precisionStep of 0 disables indexing at different precision levels.
-    -->
-    <fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
-    <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
-    <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
-    <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
-
-    <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
-         is a more restricted form of the canonical representation of dateTime
-         http://www.w3.org/TR/xmlschema-2/#dateTime    
-         The trailing "Z" designates UTC time and is mandatory.
-         Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z
-         All other components are mandatory.
-
-         Expressions can also be used to denote calculations that should be
-         performed relative to "NOW" to determine the value, ie...
-
-               NOW/HOUR
-                  ... Round to the start of the current hour
-               NOW-1DAY
-                  ... Exactly 1 day prior to now
-               NOW/DAY+6MONTHS+3DAYS
-                  ... 6 months and 3 days in the future from the start of
-                      the current day
-                      
-         Consult the TrieDateField javadocs for more information.
-
-         Note: For faster range queries, consider the tdate type
-      -->
-    <fieldType name="date" class="solr.TrieDateField" precisionStep="0" positionIncrementGap="0"/>
-
-    <!-- A Trie based date field for faster date range queries and date faceting. -->
-    <fieldType name="tdate" class="solr.TrieDateField" precisionStep="6" positionIncrementGap="0"/>
-
-
-    <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings -->
-    <fieldType name="binary" class="solr.BinaryField"/>
-
-    <!-- The "RandomSortField" is not used to store or search any
-         data.  You can declare fields of this type it in your schema
-         to generate pseudo-random orderings of your docs for sorting 
-         or function purposes.  The ordering is generated based on the field
-         name and the version of the index. As long as the index version
-         remains unchanged, and the same field name is reused,
-         the ordering of the docs will be consistent.  
-         If you want different psuedo-random orderings of documents,
-         for the same version of the index, use a dynamicField and
-         change the field name in the request.
-     -->
-    <fieldType name="random" class="solr.RandomSortField" indexed="true" />
-
-    <!-- solr.TextField allows the specification of custom text analyzers
-         specified as a tokenizer and a list of token filters. Different
-         analyzers may be specified for indexing and querying.
-
-         The optional positionIncrementGap puts space between multiple fields of
-         this type on the same document, with the purpose of preventing false phrase
-         matching across fields.
-
-         For more info on customizing your analyzer chain, please see
-         http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters
-     -->
-
-    <!-- One can also specify an existing Analyzer class that has a
-         default constructor via the class attribute on the analyzer element.
-         Example:
-    <fieldType name="text_greek" class="solr.TextField">
-      <analyzer class="org.apache.lucene.analysis.el.GreekAnalyzer"/>
-    </fieldType>
-    -->
-
-    <!-- A text field that only splits on whitespace for exact matching of words -->
-    <fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- A general text field that has reasonable, generic
-         cross-language defaults: it tokenizes with StandardTokenizer,
-   removes stop words from case-insensitive "stopwords.txt"
-   (empty by default), and down cases.  At query time only, it
-   also applies synonyms. -->
-    <fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
-      <analyzer type="index">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
-        <filter class="solr.FlattenGraphFilterFactory"/>
-        -->
-        <filter class="solr.LowerCaseFilterFactory"/>
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- A text field with defaults appropriate for English: it
-         tokenizes with StandardTokenizer, removes English stop words
-         (lang/stopwords_en.txt), down cases, protects words from protwords.txt, and
-         finally applies Porter's stemming.  The query time analyzer
-         also applies synonyms from synonyms.txt. -->
-    <fieldType name="text_en" class="solr.TextField" positionIncrementGap="100">
-      <analyzer type="index">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
-        <filter class="solr.FlattenGraphFilterFactory"/>
-        -->
-        <!-- Case insensitive stop word removal.
-        -->
-        <filter class="solr.StopFilterFactory"
-                ignoreCase="true"
-                words="lang/stopwords_en.txt"
-                />
-        <filter class="solr.LowerCaseFilterFactory"/>
-  <filter class="solr.EnglishPossessiveFilterFactory"/>
-        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-  <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
-        <filter class="solr.EnglishMinimalStemFilterFactory"/>
-  -->
-        <filter class="solr.PorterStemFilterFactory"/>
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
-        <filter class="solr.StopFilterFactory"
-                ignoreCase="true"
-                words="lang/stopwords_en.txt"
-                />
-        <filter class="solr.LowerCaseFilterFactory"/>
-  <filter class="solr.EnglishPossessiveFilterFactory"/>
-        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-  <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
-        <filter class="solr.EnglishMinimalStemFilterFactory"/>
-  -->
-        <filter class="solr.PorterStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- A text field with defaults appropriate for English, plus
-   aggressive word-splitting and autophrase features enabled.
-   This field is just like text_en, except it adds
-   WordDelimiterGraphFilter to enable splitting and matching of
-   words on case-change, alpha numeric boundaries, and
-   non-alphanumeric chars.  This means certain compound word
-   cases will work, for example query "wi fi" will match
-   document "WiFi" or "wi-fi".
-        -->
-    <fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer type="index">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
-        -->
-        <!-- Case insensitive stop word removal.
-        -->
-        <filter class="solr.StopFilterFactory"
-                ignoreCase="true"
-                words="lang/stopwords_en.txt"
-                />
-        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-        <filter class="solr.PorterStemFilterFactory"/>
-        <filter class="solr.FlattenGraphFilterFactory" />
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
-        <filter class="solr.StopFilterFactory"
-                ignoreCase="true"
-                words="lang/stopwords_en.txt"
-                />
-        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-        <filter class="solr.PorterStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
-         but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
-    <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer type="index">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
-        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-        <filter class="solr.EnglishMinimalStemFilterFactory"/>
-        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
-        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-        <filter class="solr.FlattenGraphFilterFactory" />
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
-        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
-        <filter class="solr.EnglishMinimalStemFilterFactory"/>
-        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
-        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- Just like text_general except it reverses the characters of
-   each token, to enable more efficient leading wildcard queries. -->
-    <fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
-      <analyzer type="index">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.ReversedWildcardFilterFactory" withOriginal="true"
-           maxPosAsterisk="3" maxPosQuestion="2" maxFractionAsterisk="0.33"/>
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <filter class="solr.LowerCaseFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- charFilter + WhitespaceTokenizer  -->
-    <!--
-    <fieldType name="text_char_norm" class="solr.TextField" positionIncrementGap="100" >
-      <analyzer>
-        <charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      </analyzer>
-    </fieldType>
-    -->
-
-    <!-- This is an example of using the KeywordTokenizer along
-         With various TokenFilterFactories to produce a sortable field
-         that does not include some properties of the source text
-      -->
-    <fieldType name="alphaOnlySort" class="solr.TextField" sortMissingLast="true" omitNorms="true">
-      <analyzer>
-        <!-- KeywordTokenizer does no actual tokenizing, so the entire
-             input string is preserved as a single token
-          -->
-        <tokenizer class="solr.KeywordTokenizerFactory"/>
-        <!-- The LowerCase TokenFilter does what you expect, which can be
-             when you want your sorting to be case insensitive
-          -->
-        <filter class="solr.LowerCaseFilterFactory" />
-        <!-- The TrimFilter removes any leading or trailing whitespace -->
-        <filter class="solr.TrimFilterFactory" />
-        <!-- The PatternReplaceFilter gives you the flexibility to use
-             Java Regular expression to replace any sequence of characters
-             matching a pattern with an arbitrary replacement string, 
-             which may include back references to portions of the original
-             string matched by the pattern.
-             
-             See the Java Regular Expression documentation for more
-             information on pattern and replacement string syntax.
-             
-             http://docs.oracle.com/javase/8/docs/api/java/util/regex/package-summary.html
-          -->
-        <filter class="solr.PatternReplaceFilterFactory"
-                pattern="([^a-z])" replacement="" replace="all"
-        />
-      </analyzer>
-    </fieldType>
-    
-    <fieldType name="phonetic" stored="false" indexed="true" class="solr.TextField" >
-      <analyzer>
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.DoubleMetaphoneFilterFactory" inject="false"/>
-      </analyzer>
-    </fieldType>
-
-    <fieldType name="payloads" stored="false" indexed="true" class="solr.TextField" >
-      <analyzer>
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <!--
-        The DelimitedPayloadTokenFilter can put payloads on tokens... for example,
-        a token of "foo|1.4"  would be indexed as "foo" with a payload of 1.4f
-        Attributes of the DelimitedPayloadTokenFilterFactory : 
-         "delimiter" - a one character delimiter. Default is | (pipe)
-   "encoder" - how to encode the following value into a playload
-      float -> org.apache.lucene.analysis.payloads.FloatEncoder,
-      integer -> o.a.l.a.p.IntegerEncoder
-      identity -> o.a.l.a.p.IdentityEncoder
-            Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-         -->
-        <filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- lowercases the entire field value, keeping it as a single token.  -->
-    <fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <tokenizer class="solr.KeywordTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory" />
-      </analyzer>
-    </fieldType>
-
-    <!-- 
-      Example of using PathHierarchyTokenizerFactory at index time, so
-      queries for paths match documents at that path, or in descendent paths
-    -->
-    <fieldType name="descendent_path" class="solr.TextField">
-      <analyzer type="index">
-  <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
-      </analyzer>
-      <analyzer type="query">
-  <tokenizer class="solr.KeywordTokenizerFactory" />
-      </analyzer>
-    </fieldType>
-    <!-- 
-      Example of using PathHierarchyTokenizerFactory at query time, so
-      queries for paths match documents at that path, or in ancestor paths
-    -->
-    <fieldType name="ancestor_path" class="solr.TextField">
-      <analyzer type="index">
-  <tokenizer class="solr.KeywordTokenizerFactory" />
-      </analyzer>
-      <analyzer type="query">
-  <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
-      </analyzer>
-    </fieldType>
-
-    <!-- since fields of this type are by default not stored or indexed,
-         any data added to them will be ignored outright.  --> 
-    <fieldType name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
-
-    <!-- This point type indexes the coordinates as separate fields (subFields)
-      If subFieldType is defined, it references a type, and a dynamic field
-      definition is created matching *___<typename>.  Alternately, if 
-      subFieldSuffix is defined, that is used to create the subFields.
-      Example: if subFieldType="double", then the coordinates would be
-        indexed in fields myloc_0___double,myloc_1___double.
-      Example: if subFieldSuffix="_d" then the coordinates would be indexed
-        in fields myloc_0_d,myloc_1_d
-      The subFields are an implementation detail of the fieldType, and end
-      users normally should not need to know about them.
-     -->
-    <fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d"/>
-
-    <!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. -->
-    <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
-
-    <!-- An alternative geospatial field type new to Solr 4.  It supports multiValued and polygon shapes.
-      For more information about this and other Spatial fields new to Solr 4, see:
-      http://wiki.apache.org/solr/SolrAdaptersForLuceneSpatial4
-    -->
-    <fieldType name="location_rpt" class="solr.SpatialRecursivePrefixTreeFieldType"
-        geo="true" distErrPct="0.025" maxDistErr="0.001" distanceUnits="kilometers" />
-
-   <!-- Money/currency field type. See http://wiki.apache.org/solr/MoneyFieldType
-        Parameters:
-          defaultCurrency: Specifies the default currency if none specified. Defaults to "USD"
-          precisionStep:   Specifies the precisionStep for the TrieLong field used for the amount
-          providerClass:   Lets you plug in other exchange provider backend:
-                           solr.FileExchangeRateProvider is the default and takes one parameter:
-                             currencyConfig: name of an xml file holding exchange rates
-                           solr.OpenExchangeRatesOrgProvider uses rates from openexchangerates.org:
-                             ratesFileLocation: URL or path to rates JSON file (default latest.json on the web)
-                             refreshInterval: Number of minutes between each rates fetch (default: 1440, min: 60)
-   -->
-    <fieldType name="currency" class="solr.CurrencyField" precisionStep="8" defaultCurrency="USD" currencyConfig="currency.xml" />
-             
-
-
-   <!-- some examples for different languages (generally ordered by ISO code) -->
-
-    <!-- Arabic -->
-    <fieldType name="text_ar" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- for any non-arabic -->
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ar.txt" />
-        <!-- normalizes \ufeef to \ufef1, etc -->
-        <filter class="solr.ArabicNormalizationFilterFactory"/>
-        <filter class="solr.ArabicStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- Bulgarian -->
-    <fieldType name="text_bg" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/> 
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_bg.txt" /> 
-        <filter class="solr.BulgarianStemFilterFactory"/>       
-      </analyzer>
-    </fieldType>
-    
-    <!-- Catalan -->
-    <fieldType name="text_ca" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- removes l', etc -->
-        <filter class="solr.ElisionFilterFactory" ignoreCase="true" articles="lang/contractions_ca.txt"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ca.txt" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Catalan"/>       
-      </analyzer>
-    </fieldType>
-    
-    <!-- CJK bigram (see text_ja for a Japanese configuration using morphological analysis) -->
-    <fieldType name="text_cjk" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- normalize width before bigram, as e.g. half-width dakuten combine  -->
-        <filter class="solr.CJKWidthFilterFactory"/>
-        <!-- for any non-CJK -->
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.CJKBigramFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- Kurdish -->
-    <fieldType name="text_ckb" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SoraniNormalizationFilterFactory"/>
-        <!-- for any latin text -->
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ckb.txt"/>
-        <filter class="solr.SoraniStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- Czech -->
-    <fieldType name="text_cz" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_cz.txt" />
-        <filter class="solr.CzechStemFilterFactory"/>       
-      </analyzer>
-    </fieldType>
-    
-    <!-- Danish -->
-    <fieldType name="text_da" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_da.txt" format="snowball" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Danish"/>       
-      </analyzer>
-    </fieldType>
-    
-    <!-- German -->
-    <fieldType name="text_de" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_de.txt" format="snowball" />
-        <filter class="solr.GermanNormalizationFilterFactory"/>
-        <filter class="solr.GermanLightStemFilterFactory"/>
-        <!-- less aggressive: <filter class="solr.GermanMinimalStemFilterFactory"/> -->
-        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="German2"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Greek -->
-    <fieldType name="text_el" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- greek specific lowercase for sigma -->
-        <filter class="solr.GreekLowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="false" words="lang/stopwords_el.txt" />
-        <filter class="solr.GreekStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Spanish -->
-    <fieldType name="text_es" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_es.txt" format="snowball" />
-        <filter class="solr.SpanishLightStemFilterFactory"/>
-        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="Spanish"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Basque -->
-    <fieldType name="text_eu" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_eu.txt" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Basque"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Persian -->
-    <fieldType name="text_fa" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <!-- for ZWNJ -->
-        <charFilter class="solr.PersianCharFilterFactory"/>
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.ArabicNormalizationFilterFactory"/>
-        <filter class="solr.PersianNormalizationFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_fa.txt" />
-      </analyzer>
-    </fieldType>
-    
-    <!-- Finnish -->
-    <fieldType name="text_fi" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_fi.txt" format="snowball" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Finnish"/>
-        <!-- less aggressive: <filter class="solr.FinnishLightStemFilterFactory"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- French -->
-    <fieldType name="text_fr" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- removes l', etc -->
-        <filter class="solr.ElisionFilterFactory" ignoreCase="true" articles="lang/contractions_fr.txt"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_fr.txt" format="snowball" />
-        <filter class="solr.FrenchLightStemFilterFactory"/>
-        <!-- less aggressive: <filter class="solr.FrenchMinimalStemFilterFactory"/> -->
-        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="French"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Irish -->
-    <fieldType name="text_ga" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- removes d', etc -->
-        <filter class="solr.ElisionFilterFactory" ignoreCase="true" articles="lang/contractions_ga.txt"/>
-        <!-- removes n-, etc. position increments is intentionally false! -->
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/hyphenations_ga.txt"/>
-        <filter class="solr.IrishLowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ga.txt"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Irish"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Galician -->
-    <fieldType name="text_gl" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_gl.txt" />
-        <filter class="solr.GalicianStemFilterFactory"/>
-        <!-- less aggressive: <filter class="solr.GalicianMinimalStemFilterFactory"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Hindi -->
-    <fieldType name="text_hi" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <!-- normalizes unicode representation -->
-        <filter class="solr.IndicNormalizationFilterFactory"/>
-        <!-- normalizes variation in spelling -->
-        <filter class="solr.HindiNormalizationFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_hi.txt" />
-        <filter class="solr.HindiStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Hungarian -->
-    <fieldType name="text_hu" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_hu.txt" format="snowball" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Hungarian"/>
-        <!-- less aggressive: <filter class="solr.HungarianLightStemFilterFactory"/> -->   
-      </analyzer>
-    </fieldType>
-    
-    <!-- Armenian -->
-    <fieldType name="text_hy" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_hy.txt" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Armenian"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Indonesian -->
-    <fieldType name="text_id" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_id.txt" />
-        <!-- for a less aggressive approach (only inflectional suffixes), set stemDerivational to false -->
-        <filter class="solr.IndonesianStemFilterFactory" stemDerivational="true"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Italian -->
-    <fieldType name="text_it" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- removes l', etc -->
-        <filter class="solr.ElisionFilterFactory" ignoreCase="true" articles="lang/contractions_it.txt"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_it.txt" format="snowball" />
-        <filter class="solr.ItalianLightStemFilterFactory"/>
-        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="Italian"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Japanese using morphological analysis (see text_cjk for a configuration using bigramming)
-
-         NOTE: If you want to optimize search for precision, use default operator AND in your query
-         parser config with <solrQueryParser defaultOperator="AND"/> further down in this file.  Use 
-         OR if you would like to optimize for recall (default).
-    -->
-    <fieldType name="text_ja" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="false">
-      <analyzer>
-      <!-- Kuromoji Japanese morphological analyzer/tokenizer (JapaneseTokenizer)
-
-           Kuromoji has a search mode (default) that does segmentation useful for search.  A heuristic
-           is used to segment compounds into its parts and the compound itself is kept as synonym.
-
-           Valid values for attribute mode are:
-              normal: regular segmentation
-              search: segmentation useful for search with synonyms compounds (default)
-            extended: same as search mode, but unigrams unknown words (experimental)
-
-           For some applications it might be good to use search mode for indexing and normal mode for
-           queries to reduce recall and prevent parts of compounds from being matched and highlighted.
-           Use <analyzer type="index"> and <analyzer type="query"> for this and mode normal in query.
-
-           Kuromoji also has a convenient user dictionary feature that allows overriding the statistical
-           model with your own entries for segmentation, part-of-speech tags and readings without a need
-           to specify weights.  Notice that user dictionaries have not been subject to extensive testing.
-
-           User dictionary attributes are:
-                     userDictionary: user dictionary filename
-             userDictionaryEncoding: user dictionary encoding (default is UTF-8)
-
-           See lang/userdict_ja.txt for a sample user dictionary file.
-
-           Punctuation characters are discarded by default.  Use discardPunctuation="false" to keep them.
-
-           See http://wiki.apache.org/solr/JapaneseLanguageSupport for more on Japanese language support.
-        -->
-        <tokenizer class="solr.JapaneseTokenizerFactory" mode="search"/>
-        <!--<tokenizer class="solr.JapaneseTokenizerFactory" mode="search" userDictionary="lang/userdict_ja.txt"/>-->
-        <!-- Reduces inflected verbs and adjectives to their base/dictionary forms (\u8f9e\u66f8\u5f62) -->
-        <filter class="solr.JapaneseBaseFormFilterFactory"/>
-        <!-- Removes tokens with certain part-of-speech tags -->
-        <filter class="solr.JapanesePartOfSpeechStopFilterFactory" tags="lang/stoptags_ja.txt" />
-        <!-- Normalizes full-width romaji to half-width and half-width kana to full-width (Unicode NFKC subset) -->
-        <filter class="solr.CJKWidthFilterFactory"/>
-        <!-- Removes common tokens typically not useful for search, but have a negative effect on ranking -->
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ja.txt" />
-        <!-- Normalizes common katakana spelling variations by removing any last long sound character (U+30FC) -->
-        <filter class="solr.JapaneseKatakanaStemFilterFactory" minimumLength="4"/>
-        <!-- Lower-cases romaji characters -->
-        <filter class="solr.LowerCaseFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Latvian -->
-    <fieldType name="text_lv" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_lv.txt" />
-        <filter class="solr.LatvianStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Dutch -->
-    <fieldType name="text_nl" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_nl.txt" format="snowball" />
-        <filter class="solr.StemmerOverrideFilterFactory" dictionary="lang/stemdict_nl.txt" ignoreCase="false"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Dutch"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Norwegian -->
-    <fieldType name="text_no" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_no.txt" format="snowball" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Norwegian"/>
-        <!-- less aggressive: <filter class="solr.NorwegianLightStemFilterFactory" variant="nb"/> -->
-        <!-- singular/plural: <filter class="solr.NorwegianMinimalStemFilterFactory" variant="nb"/> -->
-        <!-- The "light" and "minimal" stemmers support variants: nb=Bokm�l, nn=Nynorsk, no=Both -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Portuguese -->
-    <fieldType name="text_pt" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_pt.txt" format="snowball" />
-        <filter class="solr.PortugueseLightStemFilterFactory"/>
-        <!-- less aggressive: <filter class="solr.PortugueseMinimalStemFilterFactory"/> -->
-        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="Portuguese"/> -->
-        <!-- most aggressive: <filter class="solr.PortugueseStemFilterFactory"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Romanian -->
-    <fieldType name="text_ro" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ro.txt" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Romanian"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Russian -->
-    <fieldType name="text_ru" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_ru.txt" format="snowball" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Russian"/>
-        <!-- less aggressive: <filter class="solr.RussianLightStemFilterFactory"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Swedish -->
-    <fieldType name="text_sv" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_sv.txt" format="snowball" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Swedish"/>
-        <!-- less aggressive: <filter class="solr.SwedishLightStemFilterFactory"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Thai -->
-    <fieldType name="text_th" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.ThaiTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_th.txt" />
-      </analyzer>
-    </fieldType>
-    
-    <!-- Turkish -->
-    <fieldType name="text_tr" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.ApostropheFilterFactory"/>
-        <filter class="solr.TurkishLowerCaseFilterFactory"/>
-        <filter class="solr.StopFilterFactory" ignoreCase="false" words="lang/stopwords_tr.txt" />
-        <filter class="solr.SnowballPorterFilterFactory" language="Turkish"/>
-      </analyzer>
-    </fieldType>
-  
-  <!-- Similarity is the scoring routine for each document vs. a query.
-       A custom Similarity or SimilarityFactory may be specified here, but 
-       the default is fine for most applications.  
-       For more info: http://wiki.apache.org/solr/SchemaXml#Similarity
-    -->
-  <!--
-     <similarity class="com.example.solr.CustomSimilarityFactory">
-       <str name="paramkey">param value</str>
-     </similarity>
-    -->
-
-</schema>


[28/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10338: Configure SecureRandom non blocking for tests.

Posted by ab...@apache.org.
SOLR-10338: Configure SecureRandom non blocking for tests.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/0445f820
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/0445f820
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/0445f820

Branch: refs/heads/jira/solr-9959
Commit: 0445f8200e0630e1bb8b7117f200529ed1259747
Parents: bdd0c7e
Author: markrmiller <ma...@apache.org>
Authored: Fri Mar 31 10:53:20 2017 -0400
Committer: markrmiller <ma...@apache.org>
Committed: Fri Mar 31 10:53:20 2017 -0400

----------------------------------------------------------------------
 dev-tools/idea/.idea/workspace.xml              |  2 +-
 dev-tools/maven/pom.xml.template                |  1 +
 lucene/common-build.xml                         |  3 ++
 solr/CHANGES.txt                                |  2 +
 .../test/SecureRandomAlgorithmTesterApp.java    | 41 ++++++++++++++++++++
 .../java/org/apache/solr/SolrTestCaseJ4.java    |  9 +++++
 6 files changed, 57 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0445f820/dev-tools/idea/.idea/workspace.xml
----------------------------------------------------------------------
diff --git a/dev-tools/idea/.idea/workspace.xml b/dev-tools/idea/.idea/workspace.xml
index 0ca7f0c..e22108f 100644
--- a/dev-tools/idea/.idea/workspace.xml
+++ b/dev-tools/idea/.idea/workspace.xml
@@ -2,7 +2,7 @@
 <project version="4">
   <component name="RunManager" selected="JUnit.Lucene core">
     <configuration default="true" type="JUnit" factoryName="JUnit">
-      <option name="VM_PARAMETERS" value="-ea" />
+      <option name="VM_PARAMETERS" value="-ea -Djava.security.egd=file:/dev/./urandom" />
     </configuration>
     <configuration default="false" name="Lucene core" type="JUnit" factoryName="JUnit">
       <module name="lucene-core-tests" />

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0445f820/dev-tools/maven/pom.xml.template
----------------------------------------------------------------------
diff --git a/dev-tools/maven/pom.xml.template b/dev-tools/maven/pom.xml.template
index cd8d6b8..6b7f915 100644
--- a/dev-tools/maven/pom.xml.template
+++ b/dev-tools/maven/pom.xml.template
@@ -277,6 +277,7 @@
               <tests.postingsformat>${tests.postingsformat}</tests.postingsformat>
               <tests.timezone>${tests.timezone}</tests.timezone>
               <tests.verbose>${tests.verbose}</tests.verbose>
+              <java.security.egd>file:/dev/./urandom</java.security.egd>
             </systemPropertyVariables>
           </configuration>
         </plugin>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0445f820/lucene/common-build.xml
----------------------------------------------------------------------
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index 327a01d..aee7899 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -1062,6 +1062,9 @@
 
             <sysproperty key="tests.src.home" value="${user.dir}" />
 
+            <!-- replaces default random source to the nonblocking variant -->
+            <sysproperty key="java.security.egd" value="file:/dev/./urandom"/>
+
             <!-- Only pass these to the test JVMs if defined in ANT. -->
             <syspropertyset>
                 <propertyref prefix="tests.maxfailures" />

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0445f820/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 840de48..097ee2b 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -176,6 +176,8 @@ Other Changes
 
 * SOLR-10147: Admin UI -> Cloud -> Graph: Impossible to see shard state (Amrit Sarkar, janhoy)
 
+* SOLR-10338: Configure SecureRandom non blocking for tests. (Mihaly Toth, hossman, Ishan Chattopadhyaya, via Mark Miller)
+
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0445f820/solr/core/src/test/SecureRandomAlgorithmTesterApp.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/SecureRandomAlgorithmTesterApp.java b/solr/core/src/test/SecureRandomAlgorithmTesterApp.java
new file mode 100644
index 0000000..44f79e9
--- /dev/null
+++ b/solr/core/src/test/SecureRandomAlgorithmTesterApp.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
+
+public class SecureRandomAlgorithmTesterApp {
+  public static void main(String[] args) throws NoSuchAlgorithmException {
+    String algorithm = args[0];
+    String method = args[1];
+    int amount = Integer.valueOf(args[2]);
+    SecureRandom secureRandom;
+    if(algorithm.equals("default"))
+      secureRandom = new SecureRandom();
+    else 
+      secureRandom = SecureRandom.getInstance(algorithm);
+    System.out.println("Algorithm:" + secureRandom.getAlgorithm());
+    switch(method) {
+      case "seed": secureRandom.generateSeed(amount); break;
+      case "bytes": secureRandom.nextBytes(new byte[amount]); break;
+      case "long": secureRandom.nextLong(); break;
+      case "int": secureRandom.nextInt(); break;
+      default: throw new IllegalArgumentException("Not supported random function: " + method);
+    }
+    System.out.println("SecureRandom function invoked");
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0445f820/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index f82ccc6..be8e96d 100644
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -39,6 +39,7 @@ import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.security.SecureRandom;
 import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -2431,6 +2432,14 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
     }
   }
 
+  @BeforeClass
+  public static void assertNonBlockingRandomGeneratorAvailable() {
+    if(Boolean.parseBoolean(System.getProperty("test.solr.allow.any.securerandom","false")))
+      return;
+    // Use -Djava.security.egd=file:/dev/./urandom VM option if you hit this 
+    assertEquals("SHA1PRNG", new SecureRandom().getAlgorithm());
+  }
+  
   @AfterClass
   public static void unchooseMPForMP() {
     System.clearProperty(SYSTEM_PROPERTY_SOLR_TESTS_USEMERGEPOLICYFACTORY);


[20/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10147: Admin UI -> Cloud -> Graph: Impossible to see shard state

Posted by ab...@apache.org.
SOLR-10147: Admin UI -> Cloud -> Graph: Impossible to see shard state


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/a6f27d3e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/a6f27d3e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/a6f27d3e

Branch: refs/heads/jira/solr-9959
Commit: a6f27d3e1e457f9bc3bd4b9afcacfc37e9a87e80
Parents: 9b7c8d5
Author: Jan H�ydahl <ja...@apache.org>
Authored: Wed Mar 29 10:51:34 2017 +0200
Committer: Jan H�ydahl <ja...@apache.org>
Committed: Wed Mar 29 10:54:02 2017 +0200

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 ++
 solr/webapp/web/css/angular/cloud.css           | 24 ++++++++++++++++++++
 solr/webapp/web/js/angular/controllers/cloud.js | 19 +++++++++++-----
 solr/webapp/web/partials/cloud.html             |  1 +
 4 files changed, 40 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6f27d3e/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 80c2aa0..3692bd5 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -171,6 +171,8 @@ Other Changes
 * SOLR-10357: Enable edismax and standard query parsers to handle the option combination 
   sow=false / autoGeneratePhraseQueries="true" by setting QueryBuilder.autoGenerateMultiTermSynonymsQuery.
 
+* SOLR-10147: Admin UI -> Cloud -> Graph: Impossible to see shard state (Amrit Sarkar, janhoy)
+
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6f27d3e/solr/webapp/web/css/angular/cloud.css
----------------------------------------------------------------------
diff --git a/solr/webapp/web/css/angular/cloud.css b/solr/webapp/web/css/angular/cloud.css
index 4017c22..80e53e8 100644
--- a/solr/webapp/web/css/angular/cloud.css
+++ b/solr/webapp/web/css/angular/cloud.css
@@ -331,11 +331,13 @@ limitations under the License.
 {
   color: #C43C35;
   stroke: #C43C35;
+  font-style: italic; 
 }
 
 #content #graph-content .node.recovery_failed text
 {
   fill: #C43C35;
+  font-style: italic; 
 }
 
 #content #cloud #legend .down,
@@ -357,11 +359,13 @@ limitations under the License.
 {
   color: #d5dd00;
   stroke: #d5dd00;
+  font-style: italic; 
 }
 
 #content #graph-content .node.recovering text
 {
   fill: #d5dd00;
+  font-style: italic; 
 }
 
 #content #cloud #legend .active,
@@ -394,6 +398,12 @@ limitations under the License.
   stroke: #c0c0c0;
 }
 
+#content #cloud #legend .leader,
+#content #graph-content .leader text
+{
+  font-weight: bold;
+}
+
 #content #graph-content .node.lvl-0 circle
 {
   stroke: #fff;
@@ -410,3 +420,17 @@ limitations under the License.
   padding-top: 15px;
   padding-bottom: 15px;
 }
+
+#content #cloud #legend .shard-inactive,
+#content #cloud #legend .shard-inactive li,
+#content #cloud #legend .shard-inactive li text,
+#content #graph-content .shard-inactive text
+{
+  text-decoration: line-through;
+}
+#content #cloud #legend .shard-inactive circle,
+#content #graph-content .shard-inactive circle,
+#content #graph-content .link.shard-inactive
+{
+  stroke: #e9e9e9;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6f27d3e/solr/webapp/web/js/angular/controllers/cloud.js
----------------------------------------------------------------------
diff --git a/solr/webapp/web/js/angular/controllers/cloud.js b/solr/webapp/web/js/angular/controllers/cloud.js
index 2d0dae0..aeaf5d3 100644
--- a/solr/webapp/web/js/angular/controllers/cloud.js
+++ b/solr/webapp/web/js/angular/controllers/cloud.js
@@ -139,6 +139,8 @@ var graphSubController = function ($scope, Zookeeper, isRadial) {
                     for (var c in state) {
                         var shards = [];
                         for (var s in state[c].shards) {
+                            var shard_status = state[c].shards[s].state;
+                            shard_status = shard_status == 'inactive' ? 'shard-inactive' : shard_status;
                             var nodes = [];
                             for (var n in state[c].shards[s].replicas) {
                                 leaf_count++;
@@ -160,17 +162,19 @@ var graphSubController = function ($scope, Zookeeper, isRadial) {
                                 $scope.helperData.port.push(uri_parts.port);
                                 $scope.helperData.pathname.push(uri_parts.pathname);
 
-                                var status = replica.state;
+                                var replica_status = replica.state;
 
                                 if (!live_nodes[replica.node_name]) {
-                                    status = 'gone';
+                                    replica_status = 'gone';
+                                } else if(shard_status=='shard-inactive') {
+                                    replica_status += ' ' + shard_status;
                                 }
 
                                 var node = {
                                     name: uri,
                                     data: {
                                         type: 'node',
-                                        state: status,
+                                        state: replica_status,
                                         leader: 'true' === replica.leader,
                                         uri: uri_parts
                                     }
@@ -179,9 +183,10 @@ var graphSubController = function ($scope, Zookeeper, isRadial) {
                             }
 
                             var shard = {
-                                name: s,
+                                name: shard_status == "shard-inactive" ? s + ' (inactive)' : s,
                                 data: {
-                                    type: 'shard'
+                                    type: 'shard',
+                                    state: shard_status
                                 },
                                 children: nodes
                             };
@@ -280,7 +285,9 @@ solrAdminApp.directive('graph', function(Constants) {
                 }
 
                 if (d.data && d.data.state) {
-                    classes.push(d.data.state);
+                    if(!(d.data.type=='shard' && d.data.state=='active')){
+                        classes.push(d.data.state);
+                    }
                 }
 
                 return classes.join(' ');

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/a6f27d3e/solr/webapp/web/partials/cloud.html
----------------------------------------------------------------------
diff --git a/solr/webapp/web/partials/cloud.html b/solr/webapp/web/partials/cloud.html
index 415e123..ea1c6a0 100644
--- a/solr/webapp/web/partials/cloud.html
+++ b/solr/webapp/web/partials/cloud.html
@@ -59,6 +59,7 @@ limitations under the License.
           <li class="recovering"><svg width="15" height="15"><g transform="translate(5,2)"><g transform="translate(0,5)"><circle r="4.5"></circle></g></g></svg> Recovering</li>
           <li class="down"><svg width="15" height="15"><g transform="translate(5,2)"><g transform="translate(0,5)"><circle r="4.5"></circle></g></g></svg> Down</li>
           <li class="recovery_failed"><svg width="15" height="15"><g transform="translate(5,2)"><g transform="translate(0,5)"><circle r="4.5"></circle></g></g></svg> Recovery Failed</li>
+          <li class="shard-inactive"><svg width="15" height="15"><g transform="translate(5,2)"><g transform="translate(0,5)"><circle r="4.5"></circle></g></g></svg> Inactive</li>
           <li class="gone"><svg width="15" height="15"><g transform="translate(5,2)"><g transform="translate(0,5)"><circle r="4.5"></circle></g></g></svg> Gone</li>
         </ul>
       </div>


[02/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10344: Update Solr default/example and test configs to use WordDelimiterGraphFilterFactory

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml b/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
index af201c0..d15c199 100644
--- a/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
+++ b/solr/core/src/test-files/solr/configsets/cloud-dynamic/conf/schema.xml
@@ -45,26 +45,41 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <fieldType name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -86,14 +101,15 @@
   <fieldType name="text" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
@@ -111,14 +127,15 @@
   <fieldType name="text_np" class="solr.TextField" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/configsets/doc-expiry/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/doc-expiry/conf/schema.xml b/solr/core/src/test-files/solr/configsets/doc-expiry/conf/schema.xml
index 8400fe8..c622eba 100644
--- a/solr/core/src/test-files/solr/configsets/doc-expiry/conf/schema.xml
+++ b/solr/core/src/test-files/solr/configsets/doc-expiry/conf/schema.xml
@@ -45,26 +45,41 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <fieldType name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -86,14 +101,15 @@
   <fieldType name="text" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
@@ -111,14 +127,15 @@
   <fieldType name="text_np" class="solr.TextField" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test/org/apache/solr/ConvertedLegacyTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/ConvertedLegacyTest.java b/solr/core/src/test/org/apache/solr/ConvertedLegacyTest.java
index 767b811..bf7925a 100644
--- a/solr/core/src/test/org/apache/solr/ConvertedLegacyTest.java
+++ b/solr/core/src/test/org/apache/solr/ConvertedLegacyTest.java
@@ -872,7 +872,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 {
             );
 
 
-    // intra-word delimiter testing (WordDelimiterFilter)
+    // intra-word delimiter testing (WordDelimiterGraphFilter)
 
     assertU("<add><doc><field name=\"id\">42</field><field name=\"subword\">foo bar</field></doc></add>");
     assertU("<commit/>");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test/org/apache/solr/handler/FieldAnalysisRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/FieldAnalysisRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/FieldAnalysisRequestHandlerTest.java
index d2ef555..fc0f6be 100644
--- a/solr/core/src/test/org/apache/solr/handler/FieldAnalysisRequestHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/FieldAnalysisRequestHandlerTest.java
@@ -382,7 +382,7 @@ public class FieldAnalysisRequestHandlerTest extends AnalysisRequestHandlerTestB
   }
 
   @Test
-  public void testPositionHistoryWithWDF() throws Exception {
+  public void testPositionHistoryWithWDGF() throws Exception {
 
     FieldAnalysisRequest request = new FieldAnalysisRequest();
     request.addFieldType("skutype1");
@@ -407,12 +407,12 @@ public class FieldAnalysisRequestHandlerTest extends AnalysisRequestHandlerTestB
     assertToken(tokenList.get(1), new TokenInfo("3456-12", null, "word", 4, 11, 2, new int[]{2}, null, false));
     assertToken(tokenList.get(2), new TokenInfo("a", null, "word", 12, 13, 3, new int[]{3}, null, false));
     assertToken(tokenList.get(3), new TokenInfo("Test", null, "word", 14, 18, 4, new int[]{4}, null, false));
-    tokenList = indexPart.get("org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter");
-    assertNotNull("Expcting WordDelimiterFilter analysis breakdown", tokenList);
+    tokenList = indexPart.get("org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter");
+    assertNotNull("Expcting WordDelimiterGraphFilter analysis breakdown", tokenList);
     assertEquals(6, tokenList.size());
     assertToken(tokenList.get(0), new TokenInfo("hi", null, "word", 0, 2, 1, new int[]{1,1}, null, false));
-    assertToken(tokenList.get(1), new TokenInfo("3456", null, "word", 4, 8, 2, new int[]{2,2}, null, false));
-    assertToken(tokenList.get(2), new TokenInfo("345612", null, "word", 4, 11, 2, new int[]{2,2}, null, false));
+    assertToken(tokenList.get(1), new TokenInfo("345612", null, "word", 4, 11, 2, new int[]{2,2}, null, false));
+    assertToken(tokenList.get(2), new TokenInfo("3456", null, "word", 4, 8, 2, new int[]{2,2}, null, false));
     assertToken(tokenList.get(3), new TokenInfo("12", null, "word", 9, 11, 3, new int[]{2,3}, null, false));
     assertToken(tokenList.get(4), new TokenInfo("a", null, "word", 12, 13, 4, new int[]{3,4}, null, false));
     assertToken(tokenList.get(5), new TokenInfo("Test", null, "word", 14, 18, 5, new int[]{4,5}, null, false));
@@ -420,8 +420,8 @@ public class FieldAnalysisRequestHandlerTest extends AnalysisRequestHandlerTestB
     assertNotNull("Expcting LowerCaseFilter analysis breakdown", tokenList);
     assertEquals(6, tokenList.size());
     assertToken(tokenList.get(0), new TokenInfo("hi", null, "word", 0, 2, 1, new int[]{1,1,1}, null, false));
-    assertToken(tokenList.get(1), new TokenInfo("3456", null, "word", 4, 8, 2, new int[]{2,2,2}, null, false));
-    assertToken(tokenList.get(2), new TokenInfo("345612", null, "word", 4, 11, 2, new int[]{2,2,2}, null, false));
+    assertToken(tokenList.get(1), new TokenInfo("345612", null, "word", 4, 11, 2, new int[]{2,2,2}, null, false));
+    assertToken(tokenList.get(2), new TokenInfo("3456", null, "word", 4, 8, 2, new int[]{2,2,2}, null, false));
     assertToken(tokenList.get(3), new TokenInfo("12", null, "word", 9, 11, 3, new int[]{2,3,3}, null, false));
     assertToken(tokenList.get(4), new TokenInfo("a", null, "word", 12, 13, 4, new int[]{3,4,4}, null, false));
     assertToken(tokenList.get(5), new TokenInfo("test", null, "word", 14, 18, 5, new int[]{4,5,5}, null, false));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test/org/apache/solr/rest/schema/TestBulkSchemaAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/rest/schema/TestBulkSchemaAPI.java b/solr/core/src/test/org/apache/solr/rest/schema/TestBulkSchemaAPI.java
index 166d1fc..9f37967 100644
--- a/solr/core/src/test/org/apache/solr/rest/schema/TestBulkSchemaAPI.java
+++ b/solr/core/src/test/org/apache/solr/rest/schema/TestBulkSchemaAPI.java
@@ -396,7 +396,7 @@ public class TestBulkSchemaAPI extends RestTestBase {
         "                       'name' : 'myNewTxtField',\n" +
         "                       'class':'solr.TextField',\n" +
         "                       'positionIncrementGap':'100',\n" +
-        "                       'analyzer' : {\n" +
+        "                       'indexAnalyzer' : {\n" +
         "                               'charFilters':[\n" +
         "                                          {\n" +
         "                                           'class':'solr.PatternReplaceCharFilterFactory',\n" +
@@ -407,7 +407,32 @@ public class TestBulkSchemaAPI extends RestTestBase {
         "                               'tokenizer':{'class':'solr.WhitespaceTokenizerFactory'},\n" +
         "                               'filters':[\n" +
         "                                          {\n" +
-        "                                           'class':'solr.WordDelimiterFilterFactory',\n" +
+        "                                           'class':'solr.WordDelimiterGraphFilterFactory',\n" +
+        "                                           'preserveOriginal':'0'\n" +
+        "                                          },\n" +
+        "                                          {\n" +
+        "                                           'class':'solr.StopFilterFactory',\n" +
+        "                                           'words':'stopwords.txt',\n" +
+        "                                           'ignoreCase':'true'\n" +
+        "                                          },\n" +
+        "                                          {'class':'solr.LowerCaseFilterFactory'},\n" +
+        "                                          {'class':'solr.ASCIIFoldingFilterFactory'},\n" +
+        "                                          {'class':'solr.KStemFilterFactory'},\n" +
+        "                                          {'class':'solr.FlattenGraphFilterFactory'}\n" +
+        "                                         ]\n" +
+        "                               },\n" +
+        "                       'queryAnalyzer' : {\n" +
+        "                               'charFilters':[\n" +
+        "                                          {\n" +
+        "                                           'class':'solr.PatternReplaceCharFilterFactory',\n" +
+        "                                           'replacement':'$1$1',\n" +
+        "                                           'pattern':'([a-zA-Z])\\\\\\\\1+'\n" +
+        "                                          }\n" +
+        "                                         ],\n" +
+        "                               'tokenizer':{'class':'solr.WhitespaceTokenizerFactory'},\n" +
+        "                               'filters':[\n" +
+        "                                          {\n" +
+        "                                           'class':'solr.WordDelimiterGraphFilterFactory',\n" +
         "                                           'preserveOriginal':'0'\n" +
         "                                          },\n" +
         "                                          {\n" +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/example-DIH/solr/db/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/db/conf/managed-schema b/solr/example/example-DIH/solr/db/conf/managed-schema
index eead56f..1a1012f 100644
--- a/solr/example/example-DIH/solr/db/conf/managed-schema
+++ b/solr/example/example-DIH/solr/db/conf/managed-schema
@@ -500,7 +500,7 @@
     <!-- A text field with defaults appropriate for English, plus
    aggressive word-splitting and autophrase features enabled.
    This field is just like text_en, except it adds
-   WordDelimiterFilter to enable splitting and matching of
+   WordDelimiterGraphFilter to enable splitting and matching of
    words on case-change, alpha numeric boundaries, and
    non-alphanumeric chars.  This means certain compound word
    cases will work, for example query "wi fi" will match
@@ -518,10 +518,11 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
                 />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -530,7 +531,7 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
                 />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
@@ -540,16 +541,29 @@
     <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
          but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.EnglishMinimalStemFilterFactory"/>
         <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterFilter in conjuncton with stemming. -->
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
       </analyzer>
     </fieldType>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/example-DIH/solr/db/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/db/conf/synonyms.txt b/solr/example/example-DIH/solr/db/conf/synonyms.txt
index 7f72128..eab4ee8 100644
--- a/solr/example/example-DIH/solr/db/conf/synonyms.txt
+++ b/solr/example/example-DIH/solr/db/conf/synonyms.txt
@@ -21,7 +21,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/example-DIH/solr/mail/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/mail/conf/managed-schema b/solr/example/example-DIH/solr/mail/conf/managed-schema
index 076f83f..016f105 100644
--- a/solr/example/example-DIH/solr/mail/conf/managed-schema
+++ b/solr/example/example-DIH/solr/mail/conf/managed-schema
@@ -419,7 +419,7 @@
     <!-- A text field with defaults appropriate for English, plus
    aggressive word-splitting and autophrase features enabled.
    This field is just like text_en, except it adds
-   WordDelimiterFilter to enable splitting and matching of
+   WordDelimiterGraphFilter to enable splitting and matching of
    words on case-change, alpha numeric boundaries, and
    non-alphanumeric chars.  This means certain compound word
    cases will work, for example query "wi fi" will match
@@ -437,10 +437,11 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
                 />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -449,7 +450,7 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
                 />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
@@ -459,16 +460,29 @@
     <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
          but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.EnglishMinimalStemFilterFactory"/>
         <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterFilter in conjuncton with stemming. -->
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
       </analyzer>
     </fieldType>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/example-DIH/solr/mail/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/mail/conf/synonyms.txt b/solr/example/example-DIH/solr/mail/conf/synonyms.txt
index 7f72128..eab4ee8 100644
--- a/solr/example/example-DIH/solr/mail/conf/synonyms.txt
+++ b/solr/example/example-DIH/solr/mail/conf/synonyms.txt
@@ -21,7 +21,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/example-DIH/solr/rss/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/managed-schema b/solr/example/example-DIH/solr/rss/conf/managed-schema
index e35f49d..2064c58 100644
--- a/solr/example/example-DIH/solr/rss/conf/managed-schema
+++ b/solr/example/example-DIH/solr/rss/conf/managed-schema
@@ -242,18 +242,19 @@
         <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
         -->
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <charFilter class="solr.HTMLStripCharFilterFactory"/>
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
@@ -450,7 +451,7 @@
     <!-- A text field with defaults appropriate for English, plus
    aggressive word-splitting and autophrase features enabled.
    This field is just like text_en, except it adds
-   WordDelimiterFilter to enable splitting and matching of
+   WordDelimiterGraphFilter to enable splitting and matching of
    words on case-change, alpha numeric boundaries, and
    non-alphanumeric chars.  This means certain compound word
    cases will work, for example query "wi fi" will match
@@ -468,10 +469,11 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
                 />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -480,7 +482,7 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
                 />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
@@ -490,16 +492,29 @@
     <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
          but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.EnglishMinimalStemFilterFactory"/>
         <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterFilter in conjuncton with stemming. -->
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
       </analyzer>
     </fieldType>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/example-DIH/solr/rss/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/synonyms.txt b/solr/example/example-DIH/solr/rss/conf/synonyms.txt
index 7f72128..eab4ee8 100644
--- a/solr/example/example-DIH/solr/rss/conf/synonyms.txt
+++ b/solr/example/example-DIH/solr/rss/conf/synonyms.txt
@@ -21,7 +21,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/example-DIH/solr/solr/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/solr/conf/managed-schema b/solr/example/example-DIH/solr/solr/conf/managed-schema
index 6be0ad9..04c85c0 100644
--- a/solr/example/example-DIH/solr/solr/conf/managed-schema
+++ b/solr/example/example-DIH/solr/solr/conf/managed-schema
@@ -500,7 +500,7 @@
     <!-- A text field with defaults appropriate for English, plus
    aggressive word-splitting and autophrase features enabled.
    This field is just like text_en, except it adds
-   WordDelimiterFilter to enable splitting and matching of
+   WordDelimiterGraphFilter to enable splitting and matching of
    words on case-change, alpha numeric boundaries, and
    non-alphanumeric chars.  This means certain compound word
    cases will work, for example query "wi fi" will match
@@ -518,10 +518,11 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
                 />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -530,7 +531,7 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
                 />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
@@ -540,16 +541,29 @@
     <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
          but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.EnglishMinimalStemFilterFactory"/>
         <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterFilter in conjuncton with stemming. -->
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
       </analyzer>
     </fieldType>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/example-DIH/solr/solr/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/solr/conf/synonyms.txt b/solr/example/example-DIH/solr/solr/conf/synonyms.txt
index 7f72128..eab4ee8 100644
--- a/solr/example/example-DIH/solr/solr/conf/synonyms.txt
+++ b/solr/example/example-DIH/solr/solr/conf/synonyms.txt
@@ -21,7 +21,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/example-DIH/solr/tika/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/tika/conf/managed-schema b/solr/example/example-DIH/solr/tika/conf/managed-schema
index c4dccb2..58b2a80 100644
--- a/solr/example/example-DIH/solr/tika/conf/managed-schema
+++ b/solr/example/example-DIH/solr/tika/conf/managed-schema
@@ -353,7 +353,7 @@
     <!-- A text field with defaults appropriate for English, plus
    aggressive word-splitting and autophrase features enabled.
    This field is just like text_en, except it adds
-   WordDelimiterFilter to enable splitting and matching of
+   WordDelimiterGraphFilter to enable splitting and matching of
    words on case-change, alpha numeric boundaries, and
    non-alphanumeric chars.  This means certain compound word
    cases will work, for example query "wi fi" will match
@@ -362,13 +362,14 @@
     <fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.PorterStemFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.PorterStemFilterFactory"/>
       </analyzer>
@@ -377,13 +378,23 @@
     <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
          but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.EnglishMinimalStemFilterFactory"/>
         <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterFilter in conjuncton with stemming. -->
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
       </analyzer>
     </fieldType>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/files/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/files/conf/managed-schema b/solr/example/files/conf/managed-schema
index e936bcd..ff209be 100644
--- a/solr/example/files/conf/managed-schema
+++ b/solr/example/files/conf/managed-schema
@@ -145,27 +145,39 @@
     <analyzer type="index">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
       <filter class="solr.StopFilterFactory" words="lang/stopwords_en.txt" ignoreCase="true"/>
-      <filter class="solr.WordDelimiterFilterFactory" catenateNumbers="1" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" catenateAll="0" catenateWords="1"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" catenateAll="0" catenateWords="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory" />
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
       <filter class="solr.StopFilterFactory" words="lang/stopwords_en.txt" ignoreCase="true"/>
-      <filter class="solr.WordDelimiterFilterFactory" catenateNumbers="0" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" catenateAll="0" catenateWords="0"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="0" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" catenateAll="0" catenateWords="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
     </analyzer>
   </fieldType>
   <fieldType name="text_en_splitting_tight" class="solr.TextField" autoGeneratePhraseQueries="true" positionIncrementGap="100">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+      <filter class="solr.SynonymFilterFactory" expand="false" ignoreCase="true" synonyms="synonyms.txt"/>
+      <filter class="solr.StopFilterFactory" words="lang/stopwords_en.txt" ignoreCase="true"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="0" generateWordParts="0" catenateAll="0" catenateWords="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+      <filter class="solr.EnglishMinimalStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory" />
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" expand="false" ignoreCase="true" synonyms="synonyms.txt"/>
       <filter class="solr.StopFilterFactory" words="lang/stopwords_en.txt" ignoreCase="true"/>
-      <filter class="solr.WordDelimiterFilterFactory" catenateNumbers="1" generateNumberParts="0" generateWordParts="0" catenateAll="0" catenateWords="1"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="0" generateWordParts="0" catenateAll="0" catenateWords="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.EnglishMinimalStemFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/example/files/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/example/files/conf/synonyms.txt b/solr/example/files/conf/synonyms.txt
index 7f72128..eab4ee8 100644
--- a/solr/example/files/conf/synonyms.txt
+++ b/solr/example/files/conf/synonyms.txt
@@ -21,7 +21,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/server/solr/configsets/basic_configs/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/basic_configs/conf/managed-schema b/solr/server/solr/configsets/basic_configs/conf/managed-schema
index d7aacc4..09aaae3 100644
--- a/solr/server/solr/configsets/basic_configs/conf/managed-schema
+++ b/solr/server/solr/configsets/basic_configs/conf/managed-schema
@@ -436,7 +436,7 @@
     <!-- A text field with defaults appropriate for English, plus
          aggressive word-splitting and autophrase features enabled.
          This field is just like text_en, except it adds
-         WordDelimiterFilter to enable splitting and matching of
+         WordDelimiterGraphFilter to enable splitting and matching of
          words on case-change, alpha numeric boundaries, and
          non-alphanumeric chars.  This means certain compound word
          cases will work, for example query "wi fi" will match
@@ -455,10 +455,11 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
         />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -467,7 +468,7 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
         />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
@@ -478,16 +479,29 @@
          but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
     <dynamicField name="*_txt_en_split_tight" type="text_en_splitting_tight"  indexed="true"  stored="true"/>
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.EnglishMinimalStemFilterFactory"/>
         <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterFilter in conjuncton with stemming. -->
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
       </analyzer>
     </fieldType>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/server/solr/configsets/basic_configs/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/basic_configs/conf/synonyms.txt b/solr/server/solr/configsets/basic_configs/conf/synonyms.txt
index 7f72128..eab4ee8 100644
--- a/solr/server/solr/configsets/basic_configs/conf/synonyms.txt
+++ b/solr/server/solr/configsets/basic_configs/conf/synonyms.txt
@@ -21,7 +21,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema b/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
index b716f9c..0319eb0 100644
--- a/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
+++ b/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
@@ -435,7 +435,7 @@
     <!-- A text field with defaults appropriate for English, plus
          aggressive word-splitting and autophrase features enabled.
          This field is just like text_en, except it adds
-         WordDelimiterFilter to enable splitting and matching of
+         WordDelimiterGraphFilter to enable splitting and matching of
          words on case-change, alpha numeric boundaries, and
          non-alphanumeric chars.  This means certain compound word
          cases will work, for example query "wi fi" will match
@@ -454,10 +454,11 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
         />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -466,7 +467,7 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
         />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
@@ -477,16 +478,29 @@
          but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
     <dynamicField name="*_txt_en_split_tight" type="text_en_splitting_tight"  indexed="true"  stored="true"/>
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.EnglishMinimalStemFilterFactory"/>
         <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterFilter in conjuncton with stemming. -->
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
       </analyzer>
     </fieldType>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/server/solr/configsets/data_driven_schema_configs/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/data_driven_schema_configs/conf/synonyms.txt b/solr/server/solr/configsets/data_driven_schema_configs/conf/synonyms.txt
index 7f72128..eab4ee8 100644
--- a/solr/server/solr/configsets/data_driven_schema_configs/conf/synonyms.txt
+++ b/solr/server/solr/configsets/data_driven_schema_configs/conf/synonyms.txt
@@ -21,7 +21,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema b/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
index 9caf3d6..17dadd4 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
@@ -544,7 +544,7 @@
     <!-- A text field with defaults appropriate for English, plus
    aggressive word-splitting and autophrase features enabled.
    This field is just like text_en, except it adds
-   WordDelimiterFilter to enable splitting and matching of
+   WordDelimiterGraphFilter to enable splitting and matching of
    words on case-change, alpha numeric boundaries, and
    non-alphanumeric chars.  This means certain compound word
    cases will work, for example query "wi fi" will match
@@ -562,10 +562,11 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
                 />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
@@ -574,7 +575,7 @@
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
                 />
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.PorterStemFilterFactory"/>
@@ -584,16 +585,29 @@
     <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
          but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer>
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+        <filter class="solr.FlattenGraphFilterFactory" />
+      </analyzer>
+      <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
-        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
         <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
         <filter class="solr.EnglishMinimalStemFilterFactory"/>
         <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterFilter in conjuncton with stemming. -->
+             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
         <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
       </analyzer>
     </fieldType>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/server/solr/configsets/sample_techproducts_configs/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/synonyms.txt b/solr/server/solr/configsets/sample_techproducts_configs/conf/synonyms.txt
index 7f72128..eab4ee8 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/synonyms.txt
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/synonyms.txt
@@ -21,7 +21,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema-sql.xml
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema-sql.xml b/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema-sql.xml
index 56bf625..96bbcd8 100644
--- a/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema-sql.xml
+++ b/solr/solrj/src/test-files/solrj/solr/collection1/conf/schema-sql.xml
@@ -72,26 +72,41 @@
   <fieldtype name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldtype>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldtype name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldtype>
 
   <fieldtype name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -191,9 +206,16 @@
     </analyzer>
   </fieldtype>
   <fieldtype name="lowerpunctfilt" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+              catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -265,15 +287,16 @@
   <fieldtype name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -285,15 +308,16 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
@@ -304,8 +328,9 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -318,13 +343,14 @@
   <fieldtype name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -334,13 +360,14 @@
   <fieldtype name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>


[44/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10356: Adds basic math streaming evaluators

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CubedRootEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CubedRootEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CubedRootEvaluatorTest.java
new file mode 100644
index 0000000..0a7f3de
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CubedRootEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.CubedRootEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class CubedRootEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public CubedRootEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("cbrt", CubedRootEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("cbrt(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.cbrt(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("cbrt()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("cbrt(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("cbrt(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/FloorEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/FloorEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/FloorEvaluatorTest.java
new file mode 100644
index 0000000..0fbf16d
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/FloorEvaluatorTest.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.FloorEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class FloorEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public FloorEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("floor", FloorEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+    
+  @Test
+  public void floorOneField() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("floor(a)");
+    Object result;
+    
+    values.clear();
+    values.put("a", 1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Long);
+    Assert.assertEquals(1L, result);
+    
+    values.clear();
+    values.put("a", 1.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Long);
+    Assert.assertEquals(1L, result);
+    
+    values.clear();
+    values.put("a", -1.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Long);
+    Assert.assertEquals(-2L, result);
+  }
+
+  @Test(expected = IOException.class)
+  public void floorNoField() throws Exception{
+    factory.constructEvaluator("floor()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void floorTwoFields() throws Exception{
+    factory.constructEvaluator("floor(a,b)");
+  }
+  
+  @Test
+  public void floorNoValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("floor(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void floorNullValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("floor(a)");
+    
+    values.clear();
+    values.put("a", null);
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicCosineEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicCosineEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicCosineEvaluatorTest.java
new file mode 100644
index 0000000..7847f30
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicCosineEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.HyperbolicCosineEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class HyperbolicCosineEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public HyperbolicCosineEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("cosh", HyperbolicCosineEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("cosh(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.cosh(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("cosh()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("cosh(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("cosh(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicSineEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicSineEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicSineEvaluatorTest.java
new file mode 100644
index 0000000..22733cc
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicSineEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.HyperbolicSineEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class HyperbolicSineEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public HyperbolicSineEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("sinh", HyperbolicSineEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("sinh(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.sinh(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("sinh()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("sinh(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("sinh(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicTangentEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicTangentEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicTangentEvaluatorTest.java
new file mode 100644
index 0000000..e526a08
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/HyperbolicTangentEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.HyperbolicTangentEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class HyperbolicTangentEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public HyperbolicTangentEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("tanh", HyperbolicTangentEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("tanh(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.tanh(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("tanh()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("tanh(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("tanh(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ModuloEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ModuloEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ModuloEvaluatorTest.java
new file mode 100644
index 0000000..436763b
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ModuloEvaluatorTest.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for multitional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.ModuloEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class ModuloEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public ModuloEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("mod", ModuloEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+    
+  @Test
+  public void modTwoFieldsWithValues() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("mod(a,b)");
+    Object result;
+    
+    values.clear();
+    values.put("a", 1);
+    values.put("b", 2);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Long);
+    Assert.assertEquals(Long.valueOf(1 % 2), result);
+    
+    values.clear();
+    values.put("a", 1.1);
+    values.put("b", 2);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Double);
+    Assert.assertEquals(1.1 % 2, result);
+    
+    values.clear();
+    values.put("a", 1.1);
+    values.put("b", 2.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Double);
+    Assert.assertEquals(1.1 % 2.1, result);
+  }
+
+  @Test(expected = IOException.class)
+  public void modOneField() throws Exception{
+    factory.constructEvaluator("mod(a)");
+  }
+  
+  @Test(expected = IOException.class)
+  public void modTwoFieldWithNulls() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("mod(a,b)");
+    
+    values.clear();
+    evaluator.evaluate(new Tuple(values));
+  }
+  
+  @Test(expected = IOException.class)
+  public void modTwoFieldsWithNullDenominator() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("mod(a,b)");
+    
+    values.clear();
+    values.put("a", 1);
+    evaluator.evaluate(new Tuple(values));
+  }
+
+  @Test(expected = IOException.class)
+  public void modTwoFieldsWithNullNumerator() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("mod(a,b)");
+    
+    values.clear();
+    values.put("b", 1);
+    evaluator.evaluate(new Tuple(values));
+  }
+
+
+  @Test(expected = IOException.class)
+  public void modTwoFieldsWithMissingDenominator() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("mod(a,b)");
+    
+    values.clear();
+    values.put("a", 1);
+    evaluator.evaluate(new Tuple(values));
+  }
+
+  @Test(expected = IOException.class)
+  public void modTwoFieldsWithMissingNumerator() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("mod(a,b)");
+    
+    values.clear();
+    values.put("b", 1);
+    evaluator.evaluate(new Tuple(values));
+  }
+
+  
+  @Test(expected = IOException.class)
+  public void modManyFieldsWithValues() throws Exception{
+    factory.constructEvaluator("mod(a,b,c,d)");
+  }
+  
+  @Test
+  public void modManyFieldsWithSubmods() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("mod(a,mod(b,c))");
+    Object result;
+    
+    values.clear();
+    values.put("a", 1);
+    values.put("b", 2);
+    values.put("c", 9);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Long);
+    Assert.assertEquals(Long.valueOf(1 % (2 % 9)), result);
+  }
+  
+  @Test(expected = IOException.class)
+  public void modByZero() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("mod(a,b)");
+    
+    values.clear();
+    values.put("a", 1);
+    values.put("b", 0);
+    evaluator.evaluate(new Tuple(values));
+  }
+  
+  @Test
+  public void modZeroByValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("mod(a,b)");
+    Object result;
+    
+    values.clear();
+    values.put("a", 0);
+    values.put("b", 2);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Long);
+    Assert.assertEquals(0L, result);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/PowerEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/PowerEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/PowerEvaluatorTest.java
new file mode 100644
index 0000000..5efa7a4
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/PowerEvaluatorTest.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for multitional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.PowerEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class PowerEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public PowerEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("pow", PowerEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+    
+  @Test
+  public void powTwoFieldsWithValues() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("pow(a,b)");
+    Object result;
+    
+    values.clear();
+    values.put("a", 2);
+    values.put("b", 5);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Number);
+    Assert.assertEquals(BigDecimal.valueOf(Math.pow(2, 5)), BigDecimal.valueOf(result instanceof Long ? (long)result : (double)result));
+    
+    values.clear();
+    values.put("a", 1.1);
+    values.put("b", 2);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Number);
+    Assert.assertEquals(Math.pow(1.1, 2), result);
+    
+    values.clear();
+    values.put("a", 1.1);
+    values.put("b", 2.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Number);
+    Assert.assertEquals(Math.pow(1.1, 2.1), result);
+    
+    values.clear();
+    values.put("a", -1.1);
+    values.put("b", 2.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(Double.isNaN((double)result));
+    
+    values.clear();
+    values.put("a", 1.1);
+    values.put("b", -2.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Number);
+    Assert.assertEquals(Math.pow(1.1, -2.1), result);
+    
+    values.clear();
+    values.put("a", -1.1);
+    values.put("b", -2.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(Double.isNaN((double)result));
+  }
+
+  @Test(expected = IOException.class)
+  public void powOneField() throws Exception{
+    factory.constructEvaluator("pow(a)");
+  }
+  
+  @Test
+  public void powTwoFieldWithNulls() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("pow(a,b)");
+    
+    values.clear();
+    Assert.assertNull(evaluator.evaluate(new Tuple(values)));
+  }
+    
+  @Test
+  public void powManyFieldsWithSubpows() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("pow(a,pow(b,c))");
+    Object result;
+    
+    values.clear();
+    values.put("a", 8);
+    values.put("b", 2);
+    values.put("c", 3);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Number);
+    Assert.assertEquals(BigDecimal.valueOf(Math.pow(8, Math.pow(2, 3))), BigDecimal.valueOf(result instanceof Long ? (long)result : (double)result));
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/RoundEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/RoundEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/RoundEvaluatorTest.java
new file mode 100644
index 0000000..8851b3c
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/RoundEvaluatorTest.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.RoundEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class RoundEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public RoundEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("round", RoundEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("round(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Long);
+      Assert.assertEquals(Math.round(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45.555555D);
+    test(12.4D);
+    test(-.4D);
+    test(-0D);
+    test(-0.0235D);
+    test(-12.44444446D);
+    test(-45.23D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("round()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("round(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("round(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/SineEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/SineEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/SineEvaluatorTest.java
new file mode 100644
index 0000000..8f8a9eb
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/SineEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.SineEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class SineEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public SineEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("sin", SineEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("sin(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.sin(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("sin()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("sin(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("sin(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/SquareRootEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/SquareRootEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/SquareRootEvaluatorTest.java
new file mode 100644
index 0000000..733a8f7
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/SquareRootEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.SquareRootEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class SquareRootEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public SquareRootEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("sqrt", SquareRootEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("sqrt(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.sqrt(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("sqrt()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("sqrt(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("sqrt(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/TangentEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/TangentEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/TangentEvaluatorTest.java
new file mode 100644
index 0000000..ab0fbef
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/TangentEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.eval.TangentEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class TangentEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public TangentEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("tan", TangentEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("tan(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.tan(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("tan()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("tan(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("tan(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}


[14/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-6736: Adding support for uploading zipped configsets using ConfigSets API

Posted by ab...@apache.org.
SOLR-6736: Adding support for uploading zipped configsets using ConfigSets API


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6b0217b7
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6b0217b7
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6b0217b7

Branch: refs/heads/jira/solr-9959
Commit: 6b0217b7cbff1216bb4ffbecdba02eb8c5dd3df6
Parents: 0322068
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Wed Mar 29 00:44:27 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Wed Mar 29 00:44:27 2017 +0530

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   3 +
 .../dataimport/EntityProcessorWrapper.java      |  23 ++
 .../apache/solr/cloud/ZkSolrResourceLoader.java |   2 +-
 .../java/org/apache/solr/core/ConfigSet.java    |  10 +-
 .../org/apache/solr/core/ConfigSetService.java  |  29 +-
 .../org/apache/solr/core/CoreContainer.java     |   3 +-
 .../org/apache/solr/core/CoreDescriptor.java    |  17 +
 .../apache/solr/core/RunExecutableListener.java |  13 +-
 .../src/java/org/apache/solr/core/SolrCore.java |   1 +
 .../solr/handler/admin/ConfigSetsHandler.java   |  93 ++++-
 .../apache/solr/handler/loader/XMLLoader.java   |   7 +
 .../solr/security/PermissionNameProvider.java   |   2 +-
 .../org/apache/solr/update/UpdateHandler.java   |  16 +
 .../StatelessScriptUpdateProcessorFactory.java  |   6 +
 .../dih-script-transformer/managed-schema       |  25 ++
 .../dih-script-transformer/solrconfig.xml       |  61 +++
 .../configsets/upload/regular/managed-schema    |  25 ++
 .../configsets/upload/regular/solrconfig.xml    |  61 +++
 .../regular/xslt/xsl-update-handler-test.xsl    |  49 +++
 .../with-run-executable-listener/managed-schema |  25 ++
 .../with-run-executable-listener/solrconfig.xml |  69 ++++
 .../upload/with-script-processor/managed-schema |  25 ++
 ...missleading.extension.updateprocessor.js.txt |  23 ++
 .../upload/with-script-processor/solrconfig.xml |  65 +++
 .../apache/solr/cloud/TestConfigSetsAPI.java    | 401 ++++++++++++++++++-
 .../org/apache/solr/core/TestCodecSupport.java  |   2 +-
 .../apache/solr/core/TestDynamicLoading.java    |  11 +-
 .../solr/common/params/ConfigSetParams.java     |   1 +
 .../apache/solr/cloud/MiniSolrCloudCluster.java |   6 +-
 29 files changed, 1056 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 8875160..49300fe 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -123,6 +123,9 @@ New Features
 
 * SOLR-10076: Hide keystore and truststore passwords from /admin/info/* outputs. (Mano Kovacs via Mark Miller)
 
+* SOLR-6736: Adding support for uploading zipped configsets using ConfigSets API (Varun Rajput, Ishan Chattopadhyaya,
+  Noble Paul, Anshum Gupta, Gregory Chanan)
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
index 6370d24..8a76e11 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
@@ -17,6 +17,8 @@
 package org.apache.solr.handler.dataimport;
 
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.core.SolrCore;
 import org.apache.solr.handler.dataimport.config.ConfigNameConstants;
 import org.apache.solr.handler.dataimport.config.Entity;
 
@@ -98,6 +100,9 @@ public class EntityProcessorWrapper extends EntityProcessor {
     for (String aTransArr : transArr) {
       String trans = aTransArr.trim();
       if (trans.startsWith("script:")) {
+        // The script transformer is a potential vulnerability, esp. when the script is
+        // provided from an untrusted source. Check and don't proceed if source is untrusted.
+        checkIfTrusted(trans);
         String functionName = trans.substring("script:".length());
         ScriptTransformer scriptTransformer = new ScriptTransformer();
         scriptTransformer.setFunctionName(functionName);
@@ -126,6 +131,24 @@ public class EntityProcessorWrapper extends EntityProcessor {
 
   }
 
+  private void checkIfTrusted(String trans) {
+    if (docBuilder != null) {
+      SolrCore core = docBuilder.dataImporter.getCore();
+      boolean trusted = (core != null)? core.getCoreDescriptor().isConfigSetTrusted(): true;
+      if (!trusted) {
+        Exception ex = new SolrException(ErrorCode.UNAUTHORIZED, "The configset for this collection was uploaded "
+            + "without any authentication in place,"
+            + " and this transformer is not available for collections with untrusted configsets. To use this transformer,"
+            + " re-upload the configset after enabling authentication and authorization.");
+        String msg = "Transformer: "
+            + trans
+            + ". " + ex.getMessage();
+        log.error(msg);
+        wrapAndThrow(SEVERE, ex, msg);
+      }
+    }
+  }
+
   @SuppressWarnings("unchecked")
   static class ReflectionTransformer extends Transformer {
     final Method meth;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java b/solr/core/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java
index b4137b3..5f32ef2 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkSolrResourceLoader.java
@@ -81,7 +81,7 @@ public class ZkSolrResourceLoader extends SolrResourceLoader {
   @Override
   public InputStream openResource(String resource) throws IOException {
     InputStream is;
-    String file = configSetZkPath + "/" + resource;
+    String file = (".".equals(resource)) ? configSetZkPath : configSetZkPath + "/" + resource;
     int maxTries = 10;
     Exception exception = null;
     while (maxTries -- > 0) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/core/ConfigSet.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSet.java b/solr/core/src/java/org/apache/solr/core/ConfigSet.java
index c406506..e0c9fe4 100644
--- a/solr/core/src/java/org/apache/solr/core/ConfigSet.java
+++ b/solr/core/src/java/org/apache/solr/core/ConfigSet.java
@@ -32,11 +32,15 @@ public class ConfigSet {
 
   private final NamedList properties;
 
-  public ConfigSet(String name, SolrConfig solrConfig, IndexSchema indexSchema, NamedList properties) {
+  private final boolean trusted;
+
+  public ConfigSet(String name, SolrConfig solrConfig, IndexSchema indexSchema,
+      NamedList properties, boolean trusted) {
     this.name = name;
     this.solrconfig = solrConfig;
     this.indexSchema = indexSchema;
     this.properties = properties;
+    this.trusted = trusted;
   }
 
   public String getName() {
@@ -54,4 +58,8 @@ public class ConfigSet {
   public NamedList getProperties() {
     return properties;
   }
+  
+  public boolean isTrusted() {
+    return trusted;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/core/ConfigSetService.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSetService.java b/solr/core/src/java/org/apache/solr/core/ConfigSetService.java
index e4a135e..13ac9ce 100644
--- a/solr/core/src/java/org/apache/solr/core/ConfigSetService.java
+++ b/solr/core/src/java/org/apache/solr/core/ConfigSetService.java
@@ -28,6 +28,7 @@ import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 import org.apache.solr.cloud.CloudConfigSetService;
 import org.apache.solr.cloud.ZkController;
+import org.apache.solr.cloud.ZkSolrResourceLoader;
 import org.apache.solr.common.SolrException;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.schema.IndexSchema;
@@ -74,10 +75,22 @@ public abstract class ConfigSetService {
     SolrResourceLoader coreLoader = createCoreResourceLoader(dcore);
 
     try {
+
+      // ConfigSet properties are loaded from ConfigSetProperties.DEFAULT_FILENAME file.
+      // ConfigSet flags are loaded from the metadata of the ZK node of the configset.
+      NamedList properties = createConfigSetProperties(dcore, coreLoader);
+      NamedList flags = getConfigSetFlags(dcore, coreLoader);
+
+      boolean trusted =
+          (coreLoader instanceof ZkSolrResourceLoader
+              && flags != null
+              && flags.get("trusted") != null
+              && !flags.getBooleanArg("trusted")
+              ) ? false: true;
+
       SolrConfig solrConfig = createSolrConfig(dcore, coreLoader);
       IndexSchema schema = createIndexSchema(dcore, solrConfig);
-      NamedList properties = createConfigSetProperties(dcore, coreLoader);
-      return new ConfigSet(configName(dcore), solrConfig, schema, properties);
+      return new ConfigSet(configName(dcore), solrConfig, schema, properties, trusted);
     } catch (Exception e) {
       throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
           "Could not load conf for core " + dcore.getName() +
@@ -116,6 +129,18 @@ public abstract class ConfigSetService {
     return ConfigSetProperties.readFromResourceLoader(loader, cd.getConfigSetPropertiesName());
   }
 
+  protected NamedList getConfigSetFlags(CoreDescriptor cd, SolrResourceLoader loader) {
+    if (loader instanceof ZkSolrResourceLoader) {
+      try {
+        return ConfigSetProperties.readFromResourceLoader(loader, ".");
+      } catch (Exception ex) {
+        return null;
+      }
+    } else {
+      return null;
+    }
+  }
+
   /**
    * Create a SolrResourceLoader for a core
    * @param cd the core's CoreDescriptor

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
index 9db3261..5ec34ba 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java
@@ -945,7 +945,8 @@ public class CoreContainer {
       }
 
       ConfigSet coreConfig = coreConfigService.getConfig(dcore);
-      log.info("Creating SolrCore '{}' using configuration from {}", dcore.getName(), coreConfig.getName());
+      dcore.setConfigSetTrusted(coreConfig.isTrusted());
+      log.info("Creating SolrCore '{}' using configuration from {}, trusted={}", dcore.getName(), coreConfig.getName(), dcore.isConfigSetTrusted());
       try {
         core = new SolrCore(dcore, coreConfig);
       } catch (SolrException e) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
index a58de48..0dc2c71 100644
--- a/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
+++ b/solr/core/src/java/org/apache/solr/core/CoreDescriptor.java
@@ -66,6 +66,14 @@ public class CoreDescriptor {
   public static final String DEFAULT_EXTERNAL_PROPERTIES_FILE = "conf" + File.separator + "solrcore.properties";
 
   /**
+   * Whether this core was configured using a configSet that was trusted.
+   * This helps in avoiding the loading of plugins that have potential
+   * vulnerabilities, when the configSet was not uploaded from a trusted
+   * user.
+   */
+  private boolean trustedConfigSet = true;
+
+  /**
    * Get the standard properties in persistable form
    * @return the standard core properties in persistable form
    */
@@ -170,6 +178,7 @@ public class CoreDescriptor {
     this.coreProperties.setProperty(CORE_NAME, coreName);
     this.originalCoreProperties.setProperty(CORE_NAME, coreName);
     this.substitutableProperties.setProperty(SOLR_CORE_PROP_PREFIX + CORE_NAME, coreName);
+    this.trustedConfigSet = other.trustedConfigSet;
   }
 
   /**
@@ -372,4 +381,12 @@ public class CoreDescriptor {
   public String getConfigSetPropertiesName() {
     return coreProperties.getProperty(CORE_CONFIGSET_PROPERTIES);
   }
+
+  public boolean isConfigSetTrusted() {
+    return trustedConfigSet;
+  }
+
+  public void setConfigSetTrusted(boolean trusted) {
+    this.trustedConfigSet = trusted;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/core/RunExecutableListener.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/RunExecutableListener.java b/solr/core/src/java/org/apache/solr/core/RunExecutableListener.java
index ba0046e..c6d0090 100644
--- a/solr/core/src/java/org/apache/solr/core/RunExecutableListener.java
+++ b/solr/core/src/java/org/apache/solr/core/RunExecutableListener.java
@@ -17,8 +17,10 @@
 package org.apache.solr.core;
 
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.search.SolrIndexSearcher;
+import org.apache.solr.util.plugin.SolrCoreAware;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -30,7 +32,7 @@ import java.util.ArrayList;
 
 /**
  */
-class RunExecutableListener extends AbstractSolrEventListener {
+class RunExecutableListener extends AbstractSolrEventListener implements SolrCoreAware {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
   
   public RunExecutableListener(SolrCore core) {
@@ -66,6 +68,15 @@ class RunExecutableListener extends AbstractSolrEventListener {
     if ("false".equals(args.get("wait")) || Boolean.FALSE.equals(args.get("wait"))) wait=false;
   }
 
+  @Override
+  public void inform(SolrCore core) {
+    if (!core.getCoreDescriptor().isConfigSetTrusted()) {
+      throw new SolrException(ErrorCode.UNAUTHORIZED, "The configset for this collection was uploaded without any authentication in place,"
+          + " and this operation is not available for collections with untrusted configsets. To have this component, re-upload the configset"
+          + " after enabling authentication and authorization.");
+    }
+  }
+
   /**
    * External executable listener.
    * 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/core/SolrCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index 9d77c7e..a6ba2dc 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -962,6 +962,7 @@ public final class SolrCore implements SolrInfoMBean, SolrMetricProducer, Closea
       // Finally tell anyone who wants to know
       resourceLoader.inform(resourceLoader);
       resourceLoader.inform(this); // last call before the latch is released.
+      this.updateHandler.informEventListeners(this);
     } catch (Throwable e) {
       // release the latch, otherwise we block trying to do the close. This
       // should be fine, since counting down on a latch of 0 is still fine

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
index 5d6f02c..d3489df 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/ConfigSetsHandler.java
@@ -16,14 +16,20 @@
  */
 package org.apache.solr.handler.admin;
 
+import java.io.InputStream;
 import java.lang.invoke.MethodHandles;
-
+import java.nio.charset.StandardCharsets;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipInputStream;
 
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.Path;
 import org.apache.solr.api.Api;
 import org.apache.solr.client.solrj.SolrResponse;
 import org.apache.solr.cloud.OverseerSolrResponse;
@@ -36,6 +42,7 @@ import org.apache.solr.common.cloud.ZkNodeProps;
 import org.apache.solr.common.params.ConfigSetParams;
 import org.apache.solr.common.params.ConfigSetParams.ConfigSetAction;
 import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.ContentStream;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.common.util.Utils;
@@ -43,6 +50,10 @@ import org.apache.solr.core.CoreContainer;
 import org.apache.solr.handler.RequestHandlerBase;
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.security.AuthenticationPlugin;
+import org.apache.solr.security.AuthorizationContext;
+import org.apache.solr.security.PermissionNameProvider;
+import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.BASE_CONFIGSET;
 import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.CONFIGSETS_ACTION_PREFIX;
@@ -59,7 +70,7 @@ import static org.apache.solr.cloud.Overseer.QUEUE_OPERATION;
 /**
  * A {@link org.apache.solr.request.SolrRequestHandler} for ConfigSets API requests.
  */
-public class ConfigSetsHandler extends RequestHandlerBase {
+public class ConfigSetsHandler extends RequestHandlerBase implements PermissionNameProvider {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
   protected final CoreContainer coreContainer;
   public static long DEFAULT_ZK_TIMEOUT = 300*1000;
@@ -95,6 +106,10 @@ public class ConfigSetsHandler extends RequestHandlerBase {
       ConfigSetAction action = ConfigSetAction.get(a);
       if (action == null)
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown action: " + a);
+      if (action == ConfigSetAction.UPLOAD) {
+        handleConfigUploadRequest(req, rsp);
+        return;
+      }
       invokeAction(req, rsp, action);
     } else {
       throw new SolrException(ErrorCode.BAD_REQUEST, "action is a required param");
@@ -121,6 +136,68 @@ public class ConfigSetsHandler extends RequestHandlerBase {
     }
   }
 
+  private void handleConfigUploadRequest(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
+    String configSetName = req.getParams().get(NAME);
+    if (StringUtils.isBlank(configSetName)) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "The configuration name should be provided in the \"name\" parameter");
+    }
+
+    SolrZkClient zkClient = coreContainer.getZkController().getZkClient();
+    String configPathInZk = ZkConfigManager.CONFIGS_ZKNODE + Path.SEPARATOR + configSetName;
+
+    if (zkClient.exists(configPathInZk, true)) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "The configuration " + configSetName + " already exists in zookeeper");
+    }
+
+    Iterator<ContentStream> contentStreamsIterator = req.getContentStreams().iterator();
+
+    if (!contentStreamsIterator.hasNext()) {
+      throw new SolrException(ErrorCode.BAD_REQUEST,
+          "No stream found for the config data to be uploaded");
+    }
+
+    InputStream inputStream = contentStreamsIterator.next().getStream();
+
+    // Create a node for the configuration in zookeeper
+    boolean trusted = getTrusted(req);
+    zkClient.makePath(configPathInZk, ("{\"trusted\": " + Boolean.toString(trusted) + "}").
+        getBytes(StandardCharsets.UTF_8), true);
+
+    ZipInputStream zis = new ZipInputStream(inputStream, StandardCharsets.UTF_8);
+    ZipEntry zipEntry = null;
+    while ((zipEntry = zis.getNextEntry()) != null) {
+      String filePathInZk = configPathInZk + "/" + zipEntry.getName();
+      if (zipEntry.isDirectory()) {
+        zkClient.makePath(filePathInZk, true);
+      } else {
+        createZkNodeIfNotExistsAndSetData(zkClient, filePathInZk,
+            IOUtils.toByteArray(zis));
+      }
+    }
+    zis.close();
+  }
+
+  boolean getTrusted(SolrQueryRequest req) {
+    AuthenticationPlugin authcPlugin = coreContainer.getAuthenticationPlugin();
+    log.info("Trying to upload a configset. authcPlugin: {}, user principal: {}",
+        authcPlugin, req.getUserPrincipal());
+    if (authcPlugin != null && req.getUserPrincipal() != null) {
+      return true;
+    }
+    return false;
+  }
+
+  private void createZkNodeIfNotExistsAndSetData(SolrZkClient zkClient,
+      String filePathInZk, byte[] data) throws Exception {
+    if (!zkClient.exists(filePathInZk, true)) {
+      zkClient.create(filePathInZk, data, CreateMode.PERSISTENT, true);
+    } else {
+      zkClient.setData(filePathInZk, data, true);
+    }
+  }
+
   private void handleResponse(String operation, ZkNodeProps m,
       SolrQueryResponse rsp, long timeout) throws KeeperException, InterruptedException {
     long time = System.nanoTime();
@@ -226,4 +303,16 @@ public class ConfigSetsHandler extends RequestHandlerBase {
   public Boolean registerV2() {
     return Boolean.TRUE;
   }
+
+  @Override
+  public Name getPermissionName(AuthorizationContext ctx) {
+    switch (ctx.getHttpMethod()) {
+      case "GET":
+        return Name.CONFIG_READ_PERM;
+      case "POST":
+        return Name.CONFIG_EDIT_PERM;
+      default:
+        return null;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java b/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java
index 000edee..a07aff2 100644
--- a/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java
+++ b/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java
@@ -42,6 +42,7 @@ import com.google.common.collect.Lists;
 import org.apache.commons.io.IOUtils;
 import org.apache.solr.common.EmptyEntityResolver;
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
@@ -134,6 +135,12 @@ public class XMLLoader extends ContentStreamLoader {
 
     String tr = req.getParams().get(CommonParams.TR,null);
     if(tr!=null) {
+      if (req.getCore().getCoreDescriptor().isConfigSetTrusted() == false) {
+          throw new SolrException(ErrorCode.UNAUTHORIZED, "The configset for this collection was uploaded without any authentication in place,"
+                  + " and this operation is not available for collections with untrusted configsets. To use this feature, re-upload the configset"
+                  + " after enabling authentication and authorization.");
+      }
+
       final Transformer t = getTransformer(tr,req);
       final DOMResult result = new DOMResult();
       

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/security/PermissionNameProvider.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/security/PermissionNameProvider.java b/solr/core/src/java/org/apache/solr/security/PermissionNameProvider.java
index 425be38..9f797487 100644
--- a/solr/core/src/java/org/apache/solr/security/PermissionNameProvider.java
+++ b/solr/core/src/java/org/apache/solr/security/PermissionNameProvider.java
@@ -40,7 +40,7 @@ public interface PermissionNameProvider {
     CORE_EDIT_PERM("core-admin-edit", null),
     READ_PERM("read", "*"),
     UPDATE_PERM("update", "*"),
-    CONFIG_EDIT_PERM("config-edit", "*"),
+    CONFIG_EDIT_PERM("config-edit", unmodifiableSet(new HashSet<>(asList("*", null)))),
     CONFIG_READ_PERM("config-read", "*"),
     SCHEMA_READ_PERM("schema-read", "*"),
     SCHEMA_EDIT_PERM("schema-edit", "*"),

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
index 1cf8a3f..cbfb0d5 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
@@ -72,6 +72,22 @@ public abstract class UpdateHandler implements SolrInfoMBean {
     }
   }
 
+  /**
+   * Call the {@link SolrCoreAware#inform(SolrCore)} on all the applicable registered listeners.
+   */
+  public void informEventListeners(SolrCore core) {
+    for (SolrEventListener listener: commitCallbacks) {
+      if (listener instanceof SolrCoreAware) {
+        ((SolrCoreAware) listener).inform(core);
+      }
+    }
+    for (SolrEventListener listener: optimizeCallbacks) {
+      if (listener instanceof SolrCoreAware) {
+        ((SolrCoreAware) listener).inform(core);
+      }
+    }
+  }
+
   protected void callPostCommitCallbacks() {
     for (SolrEventListener listener : commitCallbacks) {
       listener.postCommit();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java
index 72da7b5..190501d 100644
--- a/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java
+++ b/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java
@@ -17,6 +17,7 @@
 package org.apache.solr.update.processor;
 
 import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.core.SolrCore;
@@ -217,6 +218,11 @@ public class StatelessScriptUpdateProcessorFactory extends UpdateRequestProcesso
 
   @Override
   public void inform(SolrCore core) {
+    if (!core.getCoreDescriptor().isConfigSetTrusted()) {
+      throw new SolrException(ErrorCode.UNAUTHORIZED, "The configset for this collection was uploaded without any authentication in place,"
+          + " and this operation is not available for collections with untrusted configsets. To use this component, re-upload the configset"
+          + " after enabling authentication and authorization.");
+    }
     resourceLoader = core.getResourceLoader();
 
     // test that our engines & scripts are valid

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test-files/solr/configsets/upload/dih-script-transformer/managed-schema
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/upload/dih-script-transformer/managed-schema b/solr/core/src/test-files/solr/configsets/upload/dih-script-transformer/managed-schema
new file mode 100644
index 0000000..9e2f947
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/upload/dih-script-transformer/managed-schema
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="minimal" version="1.1">
+ <types>
+  <fieldType name="string" class="solr.StrField"/>
+ </types>
+ <fields>
+   <dynamicField name="*" type="string" indexed="true" stored="true" />
+ </fields>
+</schema>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test-files/solr/configsets/upload/dih-script-transformer/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/upload/dih-script-transformer/solrconfig.xml b/solr/core/src/test-files/solr/configsets/upload/dih-script-transformer/solrconfig.xml
new file mode 100644
index 0000000..82d0cc9
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/upload/dih-script-transformer/solrconfig.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- This is a "kitchen sink" config file that tests can use.
+     When writting a new test, feel free to add *new* items (plugins,
+     config options, etc...) as long as they don't break any existing
+     tests.  if you need to test something esoteric please add a new
+     "solrconfig-your-esoteric-purpose.xml" config file.
+
+     Note in particular that this test is used by MinimalSchemaTest so
+     Anything added to this file needs to work correctly even if there
+     is now uniqueKey or defaultSearch Field.
+  -->
+
+<config>
+
+  <dataDir>${solr.data.dir:}</dataDir>
+
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
+
+  <updateHandler class="solr.DirectUpdateHandler2">
+    <commitWithin>
+      <softCommit>${solr.commitwithin.softcommit:true}</softCommit>
+    </commitWithin>
+
+  </updateHandler>
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="indent">true</str>
+      <str name="df">text</str>
+    </lst>
+
+  </requestHandler>
+
+  <requestHandler name="/update/xslt"
+                   startup="lazy"
+                   class="solr.XsltUpdateRequestHandler"/>
+
+  <requestHandler name="/update" class="solr.UpdateRequestHandler"  />
+</config>
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test-files/solr/configsets/upload/regular/managed-schema
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/upload/regular/managed-schema b/solr/core/src/test-files/solr/configsets/upload/regular/managed-schema
new file mode 100644
index 0000000..9e2f947
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/upload/regular/managed-schema
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="minimal" version="1.1">
+ <types>
+  <fieldType name="string" class="solr.StrField"/>
+ </types>
+ <fields>
+   <dynamicField name="*" type="string" indexed="true" stored="true" />
+ </fields>
+</schema>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test-files/solr/configsets/upload/regular/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/upload/regular/solrconfig.xml b/solr/core/src/test-files/solr/configsets/upload/regular/solrconfig.xml
new file mode 100644
index 0000000..82d0cc9
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/upload/regular/solrconfig.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- This is a "kitchen sink" config file that tests can use.
+     When writting a new test, feel free to add *new* items (plugins,
+     config options, etc...) as long as they don't break any existing
+     tests.  if you need to test something esoteric please add a new
+     "solrconfig-your-esoteric-purpose.xml" config file.
+
+     Note in particular that this test is used by MinimalSchemaTest so
+     Anything added to this file needs to work correctly even if there
+     is now uniqueKey or defaultSearch Field.
+  -->
+
+<config>
+
+  <dataDir>${solr.data.dir:}</dataDir>
+
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
+
+  <updateHandler class="solr.DirectUpdateHandler2">
+    <commitWithin>
+      <softCommit>${solr.commitwithin.softcommit:true}</softCommit>
+    </commitWithin>
+
+  </updateHandler>
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="indent">true</str>
+      <str name="df">text</str>
+    </lst>
+
+  </requestHandler>
+
+  <requestHandler name="/update/xslt"
+                   startup="lazy"
+                   class="solr.XsltUpdateRequestHandler"/>
+
+  <requestHandler name="/update" class="solr.UpdateRequestHandler"  />
+</config>
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test-files/solr/configsets/upload/regular/xslt/xsl-update-handler-test.xsl
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/upload/regular/xslt/xsl-update-handler-test.xsl b/solr/core/src/test-files/solr/configsets/upload/regular/xslt/xsl-update-handler-test.xsl
new file mode 100644
index 0000000..2e7359a
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/upload/regular/xslt/xsl-update-handler-test.xsl
@@ -0,0 +1,49 @@
+<?xml version='1.0' encoding='UTF-8'?>
+
+<!-- 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ -->
+
+<!-- 
+
+
+XSL transform used to test the XSLTUpdateRequestHandler.
+Transforms a test XML into standard Solr <add><doc/></add> format.
+
+ -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+  <xsl:template match="/">
+    <add>
+      <xsl:apply-templates select="/random/document"/>
+    </add>
+  </xsl:template>
+
+  <xsl:template match="document">
+    <doc boost="5.5">
+      <xsl:apply-templates select="*"/>
+    </doc>
+  </xsl:template>
+
+  <xsl:template match="node">
+    <field name="{@name}">
+      <xsl:if test="@enhance!=''">
+        <xsl:attribute name="boost"><xsl:value-of select="@enhance"/></xsl:attribute>
+      </xsl:if>
+      <xsl:value-of select="@value"/>
+    </field>
+  </xsl:template>
+
+</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test-files/solr/configsets/upload/with-run-executable-listener/managed-schema
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/upload/with-run-executable-listener/managed-schema b/solr/core/src/test-files/solr/configsets/upload/with-run-executable-listener/managed-schema
new file mode 100644
index 0000000..9e2f947
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/upload/with-run-executable-listener/managed-schema
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="minimal" version="1.1">
+ <types>
+  <fieldType name="string" class="solr.StrField"/>
+ </types>
+ <fields>
+   <dynamicField name="*" type="string" indexed="true" stored="true" />
+ </fields>
+</schema>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test-files/solr/configsets/upload/with-run-executable-listener/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/upload/with-run-executable-listener/solrconfig.xml b/solr/core/src/test-files/solr/configsets/upload/with-run-executable-listener/solrconfig.xml
new file mode 100644
index 0000000..4683841
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/upload/with-run-executable-listener/solrconfig.xml
@@ -0,0 +1,69 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- This is a "kitchen sink" config file that tests can use.
+     When writting a new test, feel free to add *new* items (plugins,
+     config options, etc...) as long as they don't break any existing
+     tests.  if you need to test something esoteric please add a new
+     "solrconfig-your-esoteric-purpose.xml" config file.
+
+     Note in particular that this test is used by MinimalSchemaTest so
+     Anything added to this file needs to work correctly even if there
+     is now uniqueKey or defaultSearch Field.
+  -->
+
+<config>
+
+  <dataDir>${solr.data.dir:}</dataDir>
+
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
+
+  <updateHandler class="solr.DirectUpdateHandler2">
+    <commitWithin>
+      <softCommit>${solr.commitwithin.softcommit:true}</softCommit>
+    </commitWithin>
+
+    <listener event="postCommit" class="solr.RunExecutableListener">
+      <str name="exe">/var/opt/resin3/__PORT__/scripts/solr/snapshooter</str>
+      <str name="dir">/var/opt/resin3/__PORT__</str>
+      <bool name="wait">true</bool>
+      <arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
+      <arr name="env"> <str>MYVAR=val1</str> </arr>
+    </listener>
+
+  </updateHandler>
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="indent">true</str>
+      <str name="df">text</str>
+    </lst>
+
+  </requestHandler>
+
+  <requestHandler name="/update/xslt"
+                   startup="lazy"
+                   class="solr.XsltUpdateRequestHandler"/>
+
+  <requestHandler name="/update" class="solr.UpdateRequestHandler"  />
+</config>
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test-files/solr/configsets/upload/with-script-processor/managed-schema
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/upload/with-script-processor/managed-schema b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/managed-schema
new file mode 100644
index 0000000..9e2f947
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/managed-schema
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<schema name="minimal" version="1.1">
+ <types>
+  <fieldType name="string" class="solr.StrField"/>
+ </types>
+ <fields>
+   <dynamicField name="*" type="string" indexed="true" stored="true" />
+ </fields>
+</schema>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test-files/solr/configsets/upload/with-script-processor/missleading.extension.updateprocessor.js.txt
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/upload/with-script-processor/missleading.extension.updateprocessor.js.txt b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/missleading.extension.updateprocessor.js.txt
new file mode 100644
index 0000000..984e1d8
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/missleading.extension.updateprocessor.js.txt
@@ -0,0 +1,23 @@
+function processAdd(cmd) {
+    // Integer.valueOf is needed here to get a tru java object, because 
+    // all javascript numbers are floating point (ie: java.lang.Double)
+    cmd.getSolrInputDocument().addField("script_added_i", 
+                                        java.lang.Integer.valueOf(42));
+    cmd.getSolrInputDocument().addField("script_added_d", 42.3);
+    
+}
+function processDelete() {
+    // NOOP
+}
+function processCommit() { 
+    // NOOP
+}
+function processRollback() {
+    // NOOP
+}
+function processMergeIndexes() {
+    // NOOP
+}
+function finish() { 
+    // NOOP
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test-files/solr/configsets/upload/with-script-processor/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/configsets/upload/with-script-processor/solrconfig.xml b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/solrconfig.xml
new file mode 100644
index 0000000..1c62889
--- /dev/null
+++ b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/solrconfig.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0" ?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- This is a "kitchen sink" config file that tests can use.
+     When writting a new test, feel free to add *new* items (plugins,
+     config options, etc...) as long as they don't break any existing
+     tests.  if you need to test something esoteric please add a new
+     "solrconfig-your-esoteric-purpose.xml" config file.
+
+     Note in particular that this test is used by MinimalSchemaTest so
+     Anything added to this file needs to work correctly even if there
+     is now uniqueKey or defaultSearch Field.
+  -->
+
+<config>
+
+  <dataDir>${solr.data.dir:}</dataDir>
+
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
+
+  <luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
+
+  <updateHandler class="solr.DirectUpdateHandler2">
+    <commitWithin>
+      <softCommit>${solr.commitwithin.softcommit:true}</softCommit>
+    </commitWithin>
+
+  </updateHandler>
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="indent">true</str>
+      <str name="df">text</str>
+    </lst>
+
+  </requestHandler>
+
+  <updateRequestProcessorChain name="force-script-engine" default="true">
+    <processor class="solr.StatelessScriptUpdateProcessorFactory">
+      <str name="engine">javascript</str>
+      <str name="script">missleading.extension.updateprocessor.js.txt</str>
+    </processor>
+    <processor class="solr.RunUpdateProcessorFactory" />
+  </updateRequestProcessorChain>
+
+  <requestHandler name="/update" class="solr.UpdateRequestHandler"  />
+</config>
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
index 2027a6d..6c20ccc 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java
@@ -16,50 +16,96 @@
  */
 package org.apache.solr.cloud;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.BASE_CONFIGSET;
+import static org.apache.solr.common.params.CommonParams.NAME;
+import static org.apache.solr.core.ConfigSetProperties.DEFAULT_FILENAME;
+
 import java.io.ByteArrayInputStream;
 import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.StringReader;
+import java.lang.invoke.MethodHandles;
+import java.net.URI;
+import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
 import java.util.Collection;
+import java.util.Deque;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipOutputStream;
 
-import com.google.common.collect.ImmutableMap;
 import org.apache.commons.io.FileUtils;
+import org.apache.http.HttpEntity;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.entity.ByteArrayEntity;
+import org.apache.http.message.BasicHeader;
+import org.apache.http.util.EntityUtils;
+import org.apache.lucene.util.TestUtil;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrClient;
+import org.apache.solr.client.solrj.SolrQuery;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.embedded.JettySolrRunner;
+import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.HttpClientUtil;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.ConfigSetAdminRequest;
 import org.apache.solr.client.solrj.request.ConfigSetAdminRequest.Create;
 import org.apache.solr.client.solrj.request.ConfigSetAdminRequest.Delete;
-import org.apache.solr.client.solrj.request.ConfigSetAdminRequest.List;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.response.CollectionAdminResponse;
 import org.apache.solr.client.solrj.response.ConfigSetAdminResponse;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.SolrException.ErrorCode;
+import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.cloud.SolrZkClient;
 import org.apache.solr.common.cloud.ZkConfigManager;
+import org.apache.solr.common.cloud.ZkStateReader;
+import org.apache.solr.common.params.CollectionParams.CollectionAction;
+import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ConfigSetParams;
 import org.apache.solr.common.params.ConfigSetParams.ConfigSetAction;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.Base64;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.Utils;
 import org.apache.solr.core.ConfigSetProperties;
+import org.apache.solr.core.TestDynamicLoading;
+import org.apache.solr.security.BasicAuthIntegrationTest;
+import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.noggit.JSONParser;
+import org.noggit.ObjectBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.cloud.OverseerConfigSetMessageHandler.BASE_CONFIGSET;
-import static org.apache.solr.common.params.CommonParams.NAME;
-import static org.apache.solr.core.ConfigSetProperties.DEFAULT_FILENAME;
+import com.google.common.collect.ImmutableMap;
 
 /**
  * Simple ConfigSets API tests on user errors and simple success cases.
  */
 public class TestConfigSetsAPI extends SolrTestCaseJ4 {
 
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
   private MiniSolrCloudCluster solrCluster;
 
   @Override
@@ -232,6 +278,349 @@ public class TestConfigSetsAPI extends SolrTestCaseJ4 {
   }
 
   @Test
+  public void testUploadErrors() throws Exception {
+    final SolrClient solrClient = new HttpSolrClient(
+        solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString());
+
+    ByteBuffer emptyData = ByteBuffer.allocate(0);
+
+    // Checking error when no configuration name is specified in request
+    Map map = postDataAndGetResponse(solrCluster.getSolrClient(),
+        solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString()
+        + "/admin/configs?action=UPLOAD&wt=json", emptyData, null, null);
+    assertNotNull(map);
+    long statusCode = (long) getObjectByPath(map, false,
+        Arrays.asList("responseHeader", "status"));
+    assertEquals(400l, statusCode);
+
+    SolrZkClient zkClient = new SolrZkClient(solrCluster.getZkServer().getZkAddress(),
+        AbstractZkTestCase.TIMEOUT, 45000, null);
+
+    // Create dummy config files in zookeeper
+    zkClient.makePath("/configs/myconf", true);
+    zkClient.create("/configs/myconf/firstDummyFile",
+        "first dummy content".getBytes(StandardCharsets.UTF_8), CreateMode.PERSISTENT, true);
+    zkClient.create("/configs/myconf/anotherDummyFile",
+        "second dummy content".getBytes(StandardCharsets.UTF_8), CreateMode.PERSISTENT, true);
+
+    // Checking error when configuration name specified already exists
+    map = postDataAndGetResponse(solrCluster.getSolrClient(),
+        solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString()
+        + "/admin/configs?action=UPLOAD&wt=json&name=myconf", emptyData, null, null);
+    assertNotNull(map);
+    statusCode = (long) getObjectByPath(map, false,
+        Arrays.asList("responseHeader", "status"));
+    assertEquals(400l, statusCode);
+    assertTrue("Expected file doesnt exist in zk. It's possibly overwritten",
+        zkClient.exists("/configs/myconf/firstDummyFile", true));
+    assertTrue("Expected file doesnt exist in zk. It's possibly overwritten",
+        zkClient.exists("/configs/myconf/anotherDummyFile", true));
+
+    zkClient.close();
+    solrClient.close();
+  }
+
+  @Test
+  public void testUpload() throws Exception {
+    String suffix = "-untrusted";
+    uploadConfigSet("regular", suffix, null, null);
+    // try to create a collection with the uploaded configset
+    createCollection("newcollection", "regular" + suffix, 1, 1, solrCluster.getSolrClient());
+    xsltRequest("newcollection");
+  }
+  
+  @Test
+  public void testUploadWithRunExecutableListener() throws Exception {
+    String suffix = "-untrusted";
+    uploadConfigSet("with-run-executable-listener", suffix, null, null);
+    // try to create a collection with the uploaded configset
+    CollectionAdminResponse resp = createCollection("newcollection3", "with-run-executable-listener" + suffix, 1, 1, solrCluster.getSolrClient());
+    log.info("Client saw errors: "+resp.getErrorMessages());
+    assertTrue(resp.getErrorMessages() != null && resp.getErrorMessages().size() > 0);
+    assertTrue(resp.getErrorMessages().getVal(0).
+        contains("The configset for this collection was uploaded without any authentication"));
+  }
+
+  @Test
+  public void testUploadWithScriptUpdateProcessor() throws Exception {
+    for (boolean withAuthorization: Arrays.asList(false, true)) {
+      String suffix;
+      if (withAuthorization) {
+        suffix = "-trusted";
+        protectConfigsHandler();
+        uploadConfigSet("with-script-processor", suffix, "solr", "SolrRocks");
+      } else {
+        suffix = "-untrusted";
+        uploadConfigSet("with-script-processor", suffix, null, null);
+      }
+      // try to create a collection with the uploaded configset
+      CollectionAdminResponse resp = createCollection("newcollection2", "with-script-processor"+suffix,
+          1, 1, solrCluster.getSolrClient());
+      
+      if (withAuthorization) {
+        scriptRequest("newcollection2");
+      } else {
+        log.info("Client saw errors: "+resp.getErrorMessages());
+        assertTrue(resp.getErrorMessages() != null && resp.getErrorMessages().size() > 0);
+        assertTrue(resp.getErrorMessages().getVal(0).
+            contains("The configset for this collection was uploaded without any authentication"));
+      }
+    }
+  }
+
+  protected SolrZkClient zkClient() {
+    ZkStateReader reader = solrCluster.getSolrClient().getZkStateReader();
+    if (reader == null)
+      solrCluster.getSolrClient().connect();
+    return solrCluster.getSolrClient().getZkStateReader().getZkClient();
+  }
+
+  private void protectConfigsHandler() throws Exception {
+    String authcPrefix = "/admin/authentication";
+    String authzPrefix = "/admin/authorization";
+
+    String securityJson = "{\n" +
+        "  'authentication':{\n" +
+        "    'class':'solr.BasicAuthPlugin',\n" +
+        "    'credentials':{'solr':'orwp2Ghgj39lmnrZOTm7Qtre1VqHFDfwAEzr0ApbN3Y= Ju5osoAqOX8iafhWpPP01E5P+sg8tK8tHON7rCYZRRw='}},\n" +
+        "  'authorization':{\n" +
+        "    'class':'solr.RuleBasedAuthorizationPlugin',\n" +
+        "    'user-role':{'solr':'admin'},\n" +
+        "    'permissions':[{'name':'security-edit','role':'admin'}, {'name':'config-edit','role':'admin'}]}}";
+
+    HttpClient cl = null;
+    try {
+      cl = HttpClientUtil.createClient(null);
+      JettySolrRunner randomJetty = solrCluster.getRandomJetty(random());
+      String baseUrl = randomJetty.getBaseUrl().toString();
+
+      zkClient().setData("/security.json", securityJson.replaceAll("'", "\"").getBytes(UTF_8), true);
+      BasicAuthIntegrationTest.verifySecurityStatus(cl, baseUrl + authcPrefix, "authentication/class", "solr.BasicAuthPlugin", 50);
+      BasicAuthIntegrationTest.verifySecurityStatus(cl, baseUrl + authzPrefix, "authorization/class", "solr.RuleBasedAuthorizationPlugin", 50);
+    } finally {
+      if (cl != null) {
+        HttpClientUtil.close(cl);
+      }
+    }
+    Thread.sleep(5000); // TODO: Without a delay, the test fails. Some problem with Authc/Authz framework?
+  }
+
+  private void uploadConfigSet(String configSetName, String suffix, String username, String password) throws Exception {
+    // Read zipped sample config
+    ByteBuffer sampleZippedConfig = TestDynamicLoading
+        .getFileContent(
+            createTempZipFile("solr/configsets/upload/"+configSetName), false);
+
+    SolrZkClient zkClient = new SolrZkClient(solrCluster.getZkServer().getZkAddress(),
+        AbstractZkTestCase.TIMEOUT, 45000, null);
+    try {
+      ZkConfigManager configManager = new ZkConfigManager(zkClient);
+      assertFalse(configManager.configExists(configSetName+suffix));
+
+      Map map = postDataAndGetResponse(solrCluster.getSolrClient(),
+          solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString() + "/admin/configs?action=UPLOAD&wt=json&name="+configSetName+suffix,
+          sampleZippedConfig, username, password);
+      assertNotNull(map);
+      long statusCode = (long) getObjectByPath(map, false, Arrays.asList("responseHeader", "status"));
+      assertEquals(0l, statusCode);
+
+      assertTrue("managed-schema file should have been uploaded",
+          zkClient.exists("/configs/"+configSetName+suffix+"/managed-schema", true));
+      assertTrue("managed-schema file contents on zookeeper are not exactly same as that of the file uploaded in config",
+          Arrays.equals(zkClient.getData("/configs/"+configSetName+suffix+"/managed-schema", null, null, true),
+              readFile("solr/configsets/upload/"+configSetName+"/managed-schema")));
+
+      assertTrue("solrconfig.xml file should have been uploaded",
+          zkClient.exists("/configs/"+configSetName+suffix+"/solrconfig.xml", true));
+      byte data[] = zkClient.getData("/configs/"+configSetName+suffix, null, null, true);
+      //assertEquals("{\"trusted\": false}", new String(data, StandardCharsets.UTF_8));
+      assertTrue("solrconfig.xml file contents on zookeeper are not exactly same as that of the file uploaded in config",
+          Arrays.equals(zkClient.getData("/configs/"+configSetName+suffix+"/solrconfig.xml", null, null, true),
+              readFile("solr/configsets/upload/"+configSetName+"/solrconfig.xml")));
+    } finally {
+      zkClient.close();
+    }
+  }
+  
+  /**
+   * Create a zip file (in the temp directory) containing all the files within the specified directory
+   * and return the path for the zip file.
+   */
+  private String createTempZipFile(String directoryPath) {
+    File zipFile = new File(solrCluster.getBaseDir().toFile().getAbsolutePath() +
+        File.separator + TestUtil.randomSimpleString(random(), 6, 8) + ".zip");
+
+    File directory = TestDynamicLoading.getFile(directoryPath);
+    log.info("Directory: "+directory.getAbsolutePath());
+    try {
+      zip (directory, zipFile);
+      log.info("Zipfile: "+zipFile.getAbsolutePath());
+      return zipFile.getAbsolutePath();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private static void zip(File directory, File zipfile) throws IOException {
+    URI base = directory.toURI();
+    Deque<File> queue = new LinkedList<File>();
+    queue.push(directory);
+    OutputStream out = new FileOutputStream(zipfile);
+    ZipOutputStream zout = new ZipOutputStream(out);
+    try {
+      while (!queue.isEmpty()) {
+        directory = queue.pop();
+        for (File kid : directory.listFiles()) {
+          String name = base.relativize(kid.toURI()).getPath();
+          if (kid.isDirectory()) {
+            queue.push(kid);
+            name = name.endsWith("/") ? name : name + "/";
+            zout.putNextEntry(new ZipEntry(name));
+          } else {
+            zout.putNextEntry(new ZipEntry(name));
+
+            InputStream in = new FileInputStream(kid);
+            try {
+              byte[] buffer = new byte[1024];
+              while (true) {
+                int readCount = in.read(buffer);
+                if (readCount < 0) {
+                  break;
+                }
+                zout.write(buffer, 0, readCount);
+              }
+            } finally {
+              in.close();
+            }
+
+            zout.closeEntry();
+          }
+        }
+      }
+    } finally {
+      zout.close();
+    }
+  }
+
+  private void xsltRequest(String collection) throws SolrServerException, IOException {
+    String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
+    try (HttpSolrClient client = new HttpSolrClient(baseUrl + "/" + collection)) {
+      String xml = 
+          "<random>" +
+              " <document>" +
+              "  <node name=\"id\" value=\"12345\"/>" +
+              "  <node name=\"name\" value=\"kitten\"/>" +
+              "  <node name=\"text\" enhance=\"3\" value=\"some other day\"/>" +
+              "  <node name=\"title\" enhance=\"4\" value=\"A story\"/>" +
+              "  <node name=\"timestamp\" enhance=\"5\" value=\"2011-07-01T10:31:57.140Z\"/>" +
+              " </document>" +
+              "</random>";
+
+      SolrQuery query = new SolrQuery();
+      query.setQuery( "*:*" );//for anything
+      query.add("qt","/update");
+      query.add(CommonParams.TR, "xsl-update-handler-test.xsl");
+      query.add("stream.body", xml);
+      query.add("commit", "true");
+      try {
+        client.query(query);
+        fail("This should've returned a 401.");
+      } catch (SolrException ex) {
+        assertEquals(ErrorCode.UNAUTHORIZED.code, ex.code());
+      }
+    }
+  }
+  
+  public void scriptRequest(String collection) throws SolrServerException, IOException {
+    SolrClient client = solrCluster.getSolrClient();
+    SolrInputDocument doc = sdoc("id", "4055", "subject", "Solr");
+    client.add(collection, doc);
+    client.commit(collection);
+
+    assertEquals("42", client.query(collection, params("q", "*:*")).getResults().get(0).get("script_added_i"));
+  }
+
+  protected CollectionAdminResponse createCollection(String collectionName, String confSetName, int numShards,
+      int replicationFactor, SolrClient client)  throws SolrServerException, IOException {
+    ModifiableSolrParams params = new ModifiableSolrParams();
+    params.set("action", CollectionAction.CREATE.toString());
+    params.set("collection.configName", confSetName);
+    params.set("name", collectionName);
+    params.set("numShards", numShards);
+    params.set("replicationFactor", replicationFactor);
+    SolrRequest request = new QueryRequest(params);
+    request.setPath("/admin/collections");
+
+    CollectionAdminResponse res = new CollectionAdminResponse();
+    res.setResponse(client.request(request));
+    return res;
+  }
+  
+  public static Map postDataAndGetResponse(CloudSolrClient cloudClient,
+      String uri, ByteBuffer bytarr, String username, String password) throws IOException {
+    HttpPost httpPost = null;
+    HttpEntity entity;
+    String response = null;
+    Map m = null;
+    
+    try {
+      httpPost = new HttpPost(uri);
+      
+      if (username != null) {
+        String userPass = username + ":" + password;
+        String encoded = Base64.byteArrayToBase64(userPass.getBytes(UTF_8));
+        BasicHeader header = new BasicHeader("Authorization", "Basic " + encoded);
+        httpPost.setHeader(header);
+      }
+
+      httpPost.setHeader("Content-Type", "application/octet-stream");
+      httpPost.setEntity(new ByteArrayEntity(bytarr.array(), bytarr
+          .arrayOffset(), bytarr.limit()));
+      entity = cloudClient.getLbClient().getHttpClient().execute(httpPost)
+          .getEntity();
+      try {
+        response = EntityUtils.toString(entity, StandardCharsets.UTF_8);
+        m = (Map) ObjectBuilder.getVal(new JSONParser(
+            new StringReader(response)));
+      } catch (JSONParser.ParseException e) {
+        fail(e.getMessage());
+      }
+    } finally {
+      httpPost.releaseConnection();
+    }
+    return m;
+  }
+
+  private static Object getObjectByPath(Map root, boolean onlyPrimitive, java.util.List<String> hierarchy) {
+    Map obj = root;
+    for (int i = 0; i < hierarchy.size(); i++) {
+      String s = hierarchy.get(i);
+      if (i < hierarchy.size() - 1) {
+        if (!(obj.get(s) instanceof Map)) return null;
+        obj = (Map) obj.get(s);
+        if (obj == null) return null;
+      } else {
+        Object val = obj.get(s);
+        if (onlyPrimitive && val instanceof Map) {
+          return null;
+        }
+        return val;
+      }
+    }
+
+    return false;
+  }
+
+  private byte[] readFile(String fname) throws IOException {
+    byte[] buf = null;
+    try (FileInputStream fis = new FileInputStream(getFile(fname))) {
+      buf = new byte[fis.available()];
+      fis.read(buf);
+    }
+    return buf;
+  }
+  
+  @Test
   public void testDeleteErrors() throws Exception {
     final String baseUrl = solrCluster.getJettySolrRunners().get(0).getBaseUrl().toString();
     final SolrClient solrClient = getHttpSolrClient(baseUrl);
@@ -304,7 +693,7 @@ public class TestConfigSetsAPI extends SolrTestCaseJ4 {
         AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
     try {
       // test empty
-      List list = new List();
+      ConfigSetAdminRequest.List list = new ConfigSetAdminRequest.List();
       ConfigSetAdminResponse.List response = list.process(solrClient);
       Collection<String> actualConfigSets = response.getConfigSets();
       assertEquals(0, actualConfigSets.size());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test/org/apache/solr/core/TestCodecSupport.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/core/TestCodecSupport.java b/solr/core/src/test/org/apache/solr/core/TestCodecSupport.java
index b6097ab..7d2f174 100644
--- a/solr/core/src/test/org/apache/solr/core/TestCodecSupport.java
+++ b/solr/core/src/test/org/apache/solr/core/TestCodecSupport.java
@@ -218,7 +218,7 @@ public class TestCodecSupport extends SolrTestCaseJ4 {
     
     try {
       c = new SolrCore(new CoreDescriptor(h.getCoreContainer(), newCoreName, testSolrHome.resolve(newCoreName)), 
-          new ConfigSet("fakeConfigset", config, schema, null));
+          new ConfigSet("fakeConfigset", config, schema, null, true));
       assertNull(h.getCoreContainer().registerCore(newCoreName, c, false, false));
       h.coreName = newCoreName;
       assertEquals("We are not using the correct core", "solrconfig_codec2.xml", h.getCore().getConfigResource());

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/core/src/test/org/apache/solr/core/TestDynamicLoading.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/core/TestDynamicLoading.java b/solr/core/src/test/org/apache/solr/core/TestDynamicLoading.java
index 306b4b2..9bbe09f 100644
--- a/solr/core/src/test/org/apache/solr/core/TestDynamicLoading.java
+++ b/solr/core/src/test/org/apache/solr/core/TestDynamicLoading.java
@@ -25,6 +25,7 @@ import org.apache.solr.util.SimplePostTool;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
@@ -257,8 +258,16 @@ public class TestDynamicLoading extends AbstractFullDistribZkTestBase {
   }
 
   public static ByteBuffer getFileContent(String f) throws IOException {
+    return getFileContent(f, true);
+  }
+  /**
+   * @param loadFromClassPath if true, it will look in the classpath to find the file,
+   *        otherwise load from absolute filesystem path.
+   */
+  public static ByteBuffer getFileContent(String f, boolean loadFromClassPath) throws IOException {
     ByteBuffer jar;
-    try (FileInputStream fis = new FileInputStream(getFile(f))) {
+    File file = loadFromClassPath ? getFile(f): new File(f);
+    try (FileInputStream fis = new FileInputStream(file)) {
       byte[] buf = new byte[fis.available()];
       fis.read(buf);
       jar = ByteBuffer.wrap(buf);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/solrj/src/java/org/apache/solr/common/params/ConfigSetParams.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/params/ConfigSetParams.java b/solr/solrj/src/java/org/apache/solr/common/params/ConfigSetParams.java
index 49c39ec..fde7e57 100644
--- a/solr/solrj/src/java/org/apache/solr/common/params/ConfigSetParams.java
+++ b/solr/solrj/src/java/org/apache/solr/common/params/ConfigSetParams.java
@@ -27,6 +27,7 @@ public interface ConfigSetParams
 
   public enum ConfigSetAction {
     CREATE,
+    UPLOAD,
     DELETE,
     LIST;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6b0217b7/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
index e8a0c08..15895d3 100644
--- a/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
+++ b/solr/test-framework/src/java/org/apache/solr/cloud/MiniSolrCloudCluster.java
@@ -470,7 +470,11 @@ public class MiniSolrCloudCluster {
       }
     }
   }
-  
+
+  public Path getBaseDir() {
+    return baseDir;
+  }
+
   public CloudSolrClient getSolrClient() {
     return solrClient;
   }


[22/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10362 Be more specific when catching this exception.

Posted by ab...@apache.org.
SOLR-10362 Be more specific when catching this exception.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/30f7914c
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/30f7914c
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/30f7914c

Branch: refs/heads/jira/solr-9959
Commit: 30f7914c3b8ed990fcc0812f10de21722e96469f
Parents: 1ace174
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Wed Mar 29 14:42:20 2017 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Wed Mar 29 15:26:33 2017 +0200

----------------------------------------------------------------------
 .../src/java/org/apache/solr/util/stats/MetricUtils.java     | 8 ++++++--
 .../src/test/org/apache/solr/util/stats/MetricUtilsTest.java | 8 ++++----
 2 files changed, 10 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/30f7914c/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java b/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
index 9809070..491932d 100644
--- a/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
+++ b/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
@@ -215,8 +215,12 @@ public class MetricUtils {
             try {
               consumer.accept(n, convertGauge(gauge, compact));
             } catch (InternalError ie) {
-              LOG.warn("Error converting gauge '" + n + "', possible JDK bug: SOLR-10362", ie);
-              consumer.accept(n, null);
+              if (n.startsWith("memory.") && ie.getMessage().contains("Memory Pool not found")) {
+                LOG.warn("Error converting gauge '" + n + "', possible JDK bug: SOLR-10362", ie);
+                consumer.accept(n, null);
+              } else {
+                throw ie;
+              }
             }
           } else if (metric instanceof Meter) {
             Meter meter = (Meter) metric;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/30f7914c/solr/core/src/test/org/apache/solr/util/stats/MetricUtilsTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/util/stats/MetricUtilsTest.java b/solr/core/src/test/org/apache/solr/util/stats/MetricUtilsTest.java
index fe98157..aa02de5 100644
--- a/solr/core/src/test/org/apache/solr/util/stats/MetricUtilsTest.java
+++ b/solr/core/src/test/org/apache/solr/util/stats/MetricUtilsTest.java
@@ -81,8 +81,8 @@ public class MetricUtilsTest extends SolrTestCaseJ4 {
     am.set("bar", 2);
     Gauge<String> gauge = () -> "foobar";
     registry.register("gauge", gauge);
-    Gauge<Long> error = () -> {throw new InternalError("expected error");};
-    registry.register("expected.error", error);
+    Gauge<Long> error = () -> {throw new InternalError("Memory Pool not found error");};
+    registry.register("memory.expected.error", error);
     MetricUtils.toMaps(registry, Collections.singletonList(MetricFilter.ALL), MetricFilter.ALL,
         false, false, false, (k, o) -> {
       Map v = (Map)o;
@@ -108,7 +108,7 @@ public class MetricUtilsTest extends SolrTestCaseJ4 {
         update = (Map<String, Object>)values.get("bar");
         assertEquals(2, update.get("value"));
         assertEquals(2, update.get("updateCount"));
-      } else if (k.startsWith("expected.error")) {
+      } else if (k.startsWith("memory.expected.error")) {
         assertNull(v);
       }
     });
@@ -147,7 +147,7 @@ public class MetricUtilsTest extends SolrTestCaseJ4 {
             update = (Map<String, Object>)values.get("bar");
             assertEquals(2, update.get("value"));
             assertEquals(2, update.get("updateCount"));
-          } else if (k.startsWith("expected.error")) {
+          } else if (k.startsWith("memory.expected.error")) {
             assertNull(o);
           } else {
             Map v = (Map)o;


[17/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10079: TestInPlaceUpdates(Distrib|Standalone) failures

Posted by ab...@apache.org.
SOLR-10079: TestInPlaceUpdates(Distrib|Standalone) failures


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/144091ad
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/144091ad
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/144091ad

Branch: refs/heads/jira/solr-9959
Commit: 144091ad2957d59f83d59c7fcb1afeda65b0f914
Parents: 66bfdcb
Author: Cao Manh Dat <da...@apache.org>
Authored: Wed Mar 29 08:09:40 2017 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Wed Mar 29 08:09:40 2017 +0700

----------------------------------------------------------------------
 .../org/apache/solr/util/TestInjection.java     |  2 +-
 .../solr/update/TestInPlaceUpdatesDistrib.java  | 22 ++++++++------------
 2 files changed, 10 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/144091ad/solr/core/src/java/org/apache/solr/util/TestInjection.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/TestInjection.java b/solr/core/src/java/org/apache/solr/util/TestInjection.java
index 1299273..6b7b1f8 100644
--- a/solr/core/src/java/org/apache/solr/util/TestInjection.java
+++ b/solr/core/src/java/org/apache/solr/util/TestInjection.java
@@ -369,7 +369,7 @@ public class TestInjection {
     Pair<Boolean,Integer> pair = parseValue(waitForReplicasInSync);
     boolean enabled = pair.first();
     if (!enabled) return true;
-    long t = System.currentTimeMillis() - 100;
+    long t = System.currentTimeMillis() - 200;
     try {
       for (int i = 0; i < pair.second(); i++) {
         if (core.isClosed()) return true;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/144091ad/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java b/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
index cbd7b02..f9f377c 100644
--- a/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
+++ b/solr/core/src/test/org/apache/solr/update/TestInPlaceUpdatesDistrib.java
@@ -201,7 +201,7 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     assertEquals(2, NONLEADERS.size());
   }
 
-  final int NUM_RETRIES = 100, WAIT_TIME = 10;
+  final int NUM_RETRIES = 100, WAIT_TIME = 50;
 
   // The following should work: full update to doc 0, in-place update for doc 0, delete doc 0
   private void reorderedDBQsSimpleTest() throws Exception {
@@ -266,8 +266,6 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     }
 
     log.info("reorderedDBQsSimpleTest: This test passed fine...");
-    clearIndex();
-    commit();
   }
 
   private void reorderedDBQIndividualReplicaTest() throws Exception {
@@ -324,7 +322,8 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
 
   private void docValuesUpdateTest() throws Exception {
     // number of docs we're testing (0 <= id), index may contain additional random docs (id < 0)
-    final int numDocs = atLeast(100);
+    int numDocs = atLeast(100);
+    if (onlyLeaderIndexes) numDocs = TestUtil.nextInt(random(), 10, 50);
     log.info("Trying num docs = " + numDocs);
     final List<Integer> ids = new ArrayList<Integer>(numDocs);
     for (int id = 0; id < numDocs; id++) {
@@ -667,8 +666,6 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     }
 
     log.info("outOfOrderUpdatesIndividualReplicaTest: This test passed fine...");
-    clearIndex();
-    commit();
   }
   
   // The following should work: full update to doc 0, in-place update for doc 0, delete doc 0
@@ -733,8 +730,6 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     }
 
     log.info("reorderedDeletesTest: This test passed fine...");
-    clearIndex();
-    commit();
   }
 
   /* Test for a situation when a document requiring in-place update cannot be "resurrected"
@@ -972,10 +967,11 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     assertEquals("Field: " + fieldName, expected, rsp.getField());
   }
 
-  private static class AsyncUpdateWithRandomCommit implements Callable<UpdateResponse> {
+  private class AsyncUpdateWithRandomCommit implements Callable<UpdateResponse> {
     UpdateRequest update;
     SolrClient solrClient;
     final Random rnd;
+    int commitBound = onlyLeaderIndexes ? 50 : 3;
 
     public AsyncUpdateWithRandomCommit (UpdateRequest update, SolrClient solrClient, long seed) {
       this.update = update;
@@ -986,7 +982,7 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
     @Override
     public UpdateResponse call() throws Exception {
       UpdateResponse resp = update.process(solrClient); //solrClient.request(update);
-      if (rnd.nextInt(3) == 0)
+      if (rnd.nextInt(commitBound) == 0)
         solrClient.commit();
       return resp;
     }
@@ -1113,9 +1109,9 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
    * @return the versions of each of the specials document returned when indexing it
    */
   protected List<Long> buildRandomIndex(Float initFloat, List<Integer> specialIds) throws Exception {
-    
+
     int id = -1; // used for non special docs
-    final int numPreDocs = rarely() ? TestUtil.nextInt(random(),0,9) : atLeast(10);
+    final int numPreDocs = rarely() || onlyLeaderIndexes ? TestUtil.nextInt(random(),0,9) : atLeast(10);
     for (int i = 1; i <= numPreDocs; i++) {
       addDocAndGetVersion("id", id, "title_s", "title" + id, "id_i", id);
       id--;
@@ -1128,7 +1124,7 @@ public class TestInPlaceUpdatesDistrib extends AbstractFullDistribZkTestBase {
         versions.add(addDocAndGetVersion("id", special, "title_s", "title" + special, "id_i", special,
                                          "inplace_updatable_float", initFloat));
       }
-      final int numPostDocs = rarely() ? TestUtil.nextInt(random(),0,9) : atLeast(10);
+      final int numPostDocs = rarely() || onlyLeaderIndexes ? TestUtil.nextInt(random(),0,2) : atLeast(10);
       for (int i = 1; i <= numPostDocs; i++) {
         addDocAndGetVersion("id", id, "title_s", "title" + id, "id_i", id);
         id--;


[45/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10356: Adds basic math streaming evaluators

Posted by ab...@apache.org.
SOLR-10356: Adds basic math streaming evaluators


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/674ce4e8
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/674ce4e8
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/674ce4e8

Branch: refs/heads/jira/solr-9959
Commit: 674ce4e89393efe3147629e76f053c9901c182dc
Parents: b02626d
Author: Dennis Gove <dp...@gmail.com>
Authored: Thu Mar 23 20:08:11 2017 -0400
Committer: Dennis Gove <dp...@gmail.com>
Committed: Sat Apr 1 21:58:04 2017 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   2 +
 .../org/apache/solr/handler/StreamHandler.java  |  79 ++++++++-
 .../solrj/io/eval/ArcCosineEvaluator.java       |  60 +++++++
 .../client/solrj/io/eval/ArcSineEvaluator.java  |  60 +++++++
 .../solrj/io/eval/ArcTangentEvaluator.java      |  60 +++++++
 .../client/solrj/io/eval/BooleanEvaluator.java  |   7 -
 .../client/solrj/io/eval/CeilingEvaluator.java  |  61 +++++++
 .../client/solrj/io/eval/CoalesceEvaluator.java |  52 ++++++
 .../client/solrj/io/eval/ComplexEvaluator.java  |   6 +
 .../solrj/io/eval/ConditionalEvaluator.java     |   6 -
 .../client/solrj/io/eval/CosineEvaluator.java   |  60 +++++++
 .../solrj/io/eval/CubedRootEvaluator.java       |  60 +++++++
 .../client/solrj/io/eval/FloorEvaluator.java    |  61 +++++++
 .../io/eval/HyperbolicCosineEvaluator.java      |  60 +++++++
 .../solrj/io/eval/HyperbolicSineEvaluator.java  |  60 +++++++
 .../io/eval/HyperbolicTangentEvaluator.java     |  60 +++++++
 .../client/solrj/io/eval/ModuloEvaluator.java   |  78 +++++++++
 .../client/solrj/io/eval/NumberEvaluator.java   |   6 -
 .../client/solrj/io/eval/PowerEvaluator.java    |  61 +++++++
 .../client/solrj/io/eval/RoundEvaluator.java    |  60 +++++++
 .../client/solrj/io/eval/SineEvaluator.java     |  60 +++++++
 .../solrj/io/eval/SquareRootEvaluator.java      |  60 +++++++
 .../client/solrj/io/eval/TangentEvaluator.java  |  60 +++++++
 .../io/stream/eval/ArcCosineEvaluatorTest.java  |  91 ++++++++++
 .../io/stream/eval/ArcSineEvaluatorTest.java    |  91 ++++++++++
 .../io/stream/eval/ArcTangentEvaluatorTest.java |  91 ++++++++++
 .../io/stream/eval/CeilingEvaluatorTest.java    |  96 +++++++++++
 .../io/stream/eval/CoalesceEvaluatorTest.java   | 112 +++++++++++++
 .../io/stream/eval/CosineEvaluatorTest.java     |  91 ++++++++++
 .../io/stream/eval/CubedRootEvaluatorTest.java  |  91 ++++++++++
 .../io/stream/eval/FloorEvaluatorTest.java      |  96 +++++++++++
 .../eval/HyperbolicCosineEvaluatorTest.java     |  91 ++++++++++
 .../eval/HyperbolicSineEvaluatorTest.java       |  91 ++++++++++
 .../eval/HyperbolicTangentEvaluatorTest.java    |  91 ++++++++++
 .../io/stream/eval/ModuloEvaluatorTest.java     | 164 +++++++++++++++++++
 .../io/stream/eval/PowerEvaluatorTest.java      | 119 ++++++++++++++
 .../io/stream/eval/RoundEvaluatorTest.java      |  95 +++++++++++
 .../solrj/io/stream/eval/SineEvaluatorTest.java |  91 ++++++++++
 .../io/stream/eval/SquareRootEvaluatorTest.java |  91 ++++++++++
 .../io/stream/eval/TangentEvaluatorTest.java    |  91 ++++++++++
 40 files changed, 2799 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 99edab4..1c3aaf7 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -61,6 +61,8 @@ New Features
 * SOLR-10292: Adds CartesianProductStream which turns a single tuple with a multi-valued field into N 
   tuples, one for each value in the multi-valued field. (Dennis Gove)
 
+* SOLR-10356: Adds basic math Streaming Evaluators (Dennis Gove)
+
 Bug Fixes
 ----------------------
 * SOLR-9262: Connection and read timeouts are being ignored by UpdateShardHandler after SOLR-4509.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index 3ede732..b508754 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -16,6 +16,9 @@
  */
 package org.apache.solr.handler;
 
+import static org.apache.solr.common.params.CommonParams.ID;
+import static org.apache.solr.common.params.CommonParams.SORT;
+
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.ArrayList;
@@ -33,27 +36,80 @@ import org.apache.solr.client.solrj.io.comp.StreamComparator;
 import org.apache.solr.client.solrj.io.eval.AbsoluteValueEvaluator;
 import org.apache.solr.client.solrj.io.eval.AddEvaluator;
 import org.apache.solr.client.solrj.io.eval.AndEvaluator;
+import org.apache.solr.client.solrj.io.eval.ArcCosineEvaluator;
+import org.apache.solr.client.solrj.io.eval.ArcSineEvaluator;
+import org.apache.solr.client.solrj.io.eval.ArcTangentEvaluator;
+import org.apache.solr.client.solrj.io.eval.CeilingEvaluator;
+import org.apache.solr.client.solrj.io.eval.CoalesceEvaluator;
+import org.apache.solr.client.solrj.io.eval.CosineEvaluator;
+import org.apache.solr.client.solrj.io.eval.CubedRootEvaluator;
 import org.apache.solr.client.solrj.io.eval.DivideEvaluator;
 import org.apache.solr.client.solrj.io.eval.EqualsEvaluator;
 import org.apache.solr.client.solrj.io.eval.ExclusiveOrEvaluator;
+import org.apache.solr.client.solrj.io.eval.FloorEvaluator;
 import org.apache.solr.client.solrj.io.eval.GreaterThanEqualToEvaluator;
 import org.apache.solr.client.solrj.io.eval.GreaterThanEvaluator;
+import org.apache.solr.client.solrj.io.eval.HyperbolicCosineEvaluator;
+import org.apache.solr.client.solrj.io.eval.HyperbolicSineEvaluator;
+import org.apache.solr.client.solrj.io.eval.HyperbolicTangentEvaluator;
 import org.apache.solr.client.solrj.io.eval.IfThenElseEvaluator;
 import org.apache.solr.client.solrj.io.eval.LessThanEqualToEvaluator;
 import org.apache.solr.client.solrj.io.eval.LessThanEvaluator;
+import org.apache.solr.client.solrj.io.eval.ModuloEvaluator;
 import org.apache.solr.client.solrj.io.eval.MultiplyEvaluator;
 import org.apache.solr.client.solrj.io.eval.NaturalLogEvaluator;
 import org.apache.solr.client.solrj.io.eval.NotEvaluator;
 import org.apache.solr.client.solrj.io.eval.OrEvaluator;
+import org.apache.solr.client.solrj.io.eval.PowerEvaluator;
 import org.apache.solr.client.solrj.io.eval.RawValueEvaluator;
+import org.apache.solr.client.solrj.io.eval.RoundEvaluator;
+import org.apache.solr.client.solrj.io.eval.SineEvaluator;
+import org.apache.solr.client.solrj.io.eval.SquareRootEvaluator;
 import org.apache.solr.client.solrj.io.eval.SubtractEvaluator;
+import org.apache.solr.client.solrj.io.eval.TangentEvaluator;
 import org.apache.solr.client.solrj.io.graph.GatherNodesStream;
 import org.apache.solr.client.solrj.io.graph.ShortestPathStream;
 import org.apache.solr.client.solrj.io.ops.ConcatOperation;
 import org.apache.solr.client.solrj.io.ops.DistinctOperation;
 import org.apache.solr.client.solrj.io.ops.GroupOperation;
 import org.apache.solr.client.solrj.io.ops.ReplaceOperation;
-import org.apache.solr.client.solrj.io.stream.*;
+import org.apache.solr.client.solrj.io.stream.CartesianProductStream;
+import org.apache.solr.client.solrj.io.stream.CloudSolrStream;
+import org.apache.solr.client.solrj.io.stream.CommitStream;
+import org.apache.solr.client.solrj.io.stream.ComplementStream;
+import org.apache.solr.client.solrj.io.stream.DaemonStream;
+import org.apache.solr.client.solrj.io.stream.ExceptionStream;
+import org.apache.solr.client.solrj.io.stream.ExecutorStream;
+import org.apache.solr.client.solrj.io.stream.FacetStream;
+import org.apache.solr.client.solrj.io.stream.FeaturesSelectionStream;
+import org.apache.solr.client.solrj.io.stream.FetchStream;
+import org.apache.solr.client.solrj.io.stream.HashJoinStream;
+import org.apache.solr.client.solrj.io.stream.HavingStream;
+import org.apache.solr.client.solrj.io.stream.InnerJoinStream;
+import org.apache.solr.client.solrj.io.stream.IntersectStream;
+import org.apache.solr.client.solrj.io.stream.JDBCStream;
+import org.apache.solr.client.solrj.io.stream.LeftOuterJoinStream;
+import org.apache.solr.client.solrj.io.stream.MergeStream;
+import org.apache.solr.client.solrj.io.stream.ModelStream;
+import org.apache.solr.client.solrj.io.stream.NullStream;
+import org.apache.solr.client.solrj.io.stream.OuterHashJoinStream;
+import org.apache.solr.client.solrj.io.stream.ParallelStream;
+import org.apache.solr.client.solrj.io.stream.PriorityStream;
+import org.apache.solr.client.solrj.io.stream.RandomStream;
+import org.apache.solr.client.solrj.io.stream.RankStream;
+import org.apache.solr.client.solrj.io.stream.ReducerStream;
+import org.apache.solr.client.solrj.io.stream.RollupStream;
+import org.apache.solr.client.solrj.io.stream.ScoreNodesStream;
+import org.apache.solr.client.solrj.io.stream.SelectStream;
+import org.apache.solr.client.solrj.io.stream.SignificantTermsStream;
+import org.apache.solr.client.solrj.io.stream.SortStream;
+import org.apache.solr.client.solrj.io.stream.StatsStream;
+import org.apache.solr.client.solrj.io.stream.StreamContext;
+import org.apache.solr.client.solrj.io.stream.TextLogitStream;
+import org.apache.solr.client.solrj.io.stream.TopicStream;
+import org.apache.solr.client.solrj.io.stream.TupleStream;
+import org.apache.solr.client.solrj.io.stream.UniqueStream;
+import org.apache.solr.client.solrj.io.stream.UpdateStream;
 import org.apache.solr.client.solrj.io.stream.expr.Explanation;
 import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
 import org.apache.solr.client.solrj.io.stream.expr.Expressible;
@@ -80,9 +136,6 @@ import org.apache.solr.util.plugin.SolrCoreAware;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.solr.common.params.CommonParams.ID;
-import static org.apache.solr.common.params.CommonParams.SORT;
-
 public class StreamHandler extends RequestHandlerBase implements SolrCoreAware, PermissionNameProvider {
 
   static SolrClientCache clientCache = new SolrClientCache();
@@ -207,6 +260,24 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
       .withFunctionName("mult", MultiplyEvaluator.class)
       .withFunctionName("sub", SubtractEvaluator.class)
       .withFunctionName("log", NaturalLogEvaluator.class)
+      .withFunctionName("pow", PowerEvaluator.class)
+      .withFunctionName("mod", ModuloEvaluator.class)
+      .withFunctionName("ceil", CeilingEvaluator.class)
+      .withFunctionName("floor", FloorEvaluator.class)
+      .withFunctionName("sin", SineEvaluator.class)
+      .withFunctionName("asin", ArcSineEvaluator.class)
+      .withFunctionName("sinh", HyperbolicSineEvaluator.class)
+      .withFunctionName("cos", CosineEvaluator.class)
+      .withFunctionName("acos", ArcCosineEvaluator.class)
+      .withFunctionName("cosh", HyperbolicCosineEvaluator.class)
+      .withFunctionName("tan", TangentEvaluator.class)
+      .withFunctionName("atan", ArcTangentEvaluator.class)
+      .withFunctionName("tanh", HyperbolicTangentEvaluator.class)
+      .withFunctionName("round", RoundEvaluator.class)
+      .withFunctionName("sqrt", SquareRootEvaluator.class)
+      .withFunctionName("cbrt", CubedRootEvaluator.class)
+      .withFunctionName("coalesce", CoalesceEvaluator.class)
+      
       // Conditional Stream Evaluators
       .withFunctionName("if", IfThenElseEvaluator.class)
       .withFunctionName("analyze", AnalyzeEvaluator.class)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcCosineEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcCosineEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcCosineEvaluator.java
new file mode 100644
index 0000000..0c8e383
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcCosineEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class ArcCosineEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public ArcCosineEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.acos(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcSineEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcSineEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcSineEvaluator.java
new file mode 100644
index 0000000..ed95165
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcSineEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class ArcSineEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public ArcSineEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.asin(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcTangentEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcTangentEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcTangentEvaluator.java
new file mode 100644
index 0000000..9325b41
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ArcTangentEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class ArcTangentEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public ArcTangentEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.atan(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/BooleanEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/BooleanEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/BooleanEvaluator.java
index f02f1fa..908562f 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/BooleanEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/BooleanEvaluator.java
@@ -24,13 +24,11 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.solr.client.solrj.io.Tuple;
-import org.apache.solr.client.solrj.io.stream.StreamContext;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 public abstract class BooleanEvaluator extends ComplexEvaluator {
   protected static final long serialVersionUID = 1L;
-  protected StreamContext streamContext;
   
   public BooleanEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
     super(expression, factory);
@@ -48,11 +46,6 @@ public abstract class BooleanEvaluator extends ComplexEvaluator {
     return results;
   }
 
-  public void setStreamContext(StreamContext streamContext) {
-    this.streamContext = streamContext;
-  }
-
-
   public interface Checker {
     default boolean isNullAllowed(){
       return false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CeilingEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CeilingEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CeilingEvaluator.java
new file mode 100644
index 0000000..e2ccc8f
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CeilingEvaluator.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class CeilingEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+
+  public CeilingEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+
+    List<BigDecimal> results = evaluateAll(tuple);
+
+    // we're still doing these checks because if we ever add an array-flatten evaluator,
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+
+    if(null == results.get(0)){
+      return null;
+    }
+
+    return normalizeType(results.get(0).setScale(0, RoundingMode.CEILING));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CoalesceEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CoalesceEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CoalesceEvaluator.java
new file mode 100644
index 0000000..8a6eda4
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CoalesceEvaluator.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class CoalesceEvaluator extends ComplexEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public CoalesceEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(subEvaluators.size() < 1){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting at least one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Object evaluate(Tuple tuple) throws IOException {
+    
+    for(StreamEvaluator evaluator : subEvaluators){
+      Object result = evaluator.evaluate(tuple);
+      if(null != result){
+        return result;
+      }
+    }
+        
+    return null;    
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ComplexEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ComplexEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ComplexEvaluator.java
index 1e56d12..59a4653 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ComplexEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ComplexEvaluator.java
@@ -22,6 +22,7 @@ import java.util.List;
 import java.util.Locale;
 import java.util.UUID;
 
+import org.apache.solr.client.solrj.io.stream.StreamContext;
 import org.apache.solr.client.solrj.io.stream.expr.Explanation;
 import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
@@ -31,6 +32,7 @@ import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 public abstract class ComplexEvaluator implements StreamEvaluator {
   protected static final long serialVersionUID = 1L;
+  protected StreamContext streamContext;
   
   protected UUID nodeId = UUID.randomUUID();
   
@@ -96,4 +98,8 @@ public abstract class ComplexEvaluator implements StreamEvaluator {
       .withImplementingClass(getClass().getName())
       .withExpression(toExpression(factory).toString());
   }
+  
+  public void setStreamContext(StreamContext context) {
+    this.streamContext = context;
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConditionalEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConditionalEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConditionalEvaluator.java
index 6126544..025bfae 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConditionalEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConditionalEvaluator.java
@@ -24,13 +24,11 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.solr.client.solrj.io.Tuple;
-import org.apache.solr.client.solrj.io.stream.StreamContext;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 public abstract class ConditionalEvaluator extends ComplexEvaluator {
   protected static final long serialVersionUID = 1L;
-  protected StreamContext streamContext;
   
   public ConditionalEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
     super(expression, factory);
@@ -45,10 +43,6 @@ public abstract class ConditionalEvaluator extends ComplexEvaluator {
     return results;
   }
 
-  public void setStreamContext(StreamContext streamContext) {
-    this.streamContext = streamContext;
-  }
-  
   public interface Checker {
     default boolean isNullAllowed(){
       return false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CosineEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CosineEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CosineEvaluator.java
new file mode 100644
index 0000000..6adbb81
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CosineEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class CosineEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public CosineEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.cos(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CubedRootEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CubedRootEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CubedRootEvaluator.java
new file mode 100644
index 0000000..4cd9277
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/CubedRootEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class CubedRootEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public CubedRootEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.cbrt(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/FloorEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/FloorEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/FloorEvaluator.java
new file mode 100644
index 0000000..0191a8e
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/FloorEvaluator.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class FloorEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+
+  public FloorEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+
+    List<BigDecimal> results = evaluateAll(tuple);
+
+    // we're still doing these checks because if we ever add an array-flatten evaluator,
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+
+    if(null == results.get(0)){
+      return null;
+    }
+
+    return normalizeType(results.get(0).setScale(0, RoundingMode.FLOOR));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicCosineEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicCosineEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicCosineEvaluator.java
new file mode 100644
index 0000000..4e973a4
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicCosineEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class HyperbolicCosineEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public HyperbolicCosineEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.cosh(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicSineEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicSineEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicSineEvaluator.java
new file mode 100644
index 0000000..5bf4a38
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicSineEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class HyperbolicSineEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public HyperbolicSineEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.sinh(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicTangentEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicTangentEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicTangentEvaluator.java
new file mode 100644
index 0000000..89aacd1
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/HyperbolicTangentEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class HyperbolicTangentEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public HyperbolicTangentEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.tanh(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ModuloEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ModuloEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ModuloEvaluator.java
new file mode 100644
index 0000000..928754b
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ModuloEvaluator.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.math.MathContext;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class ModuloEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public ModuloEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(2 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting two values but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // two found in the constructor could become != 2
+    if(2 != results.size()){
+      String message = null;
+      if(1 == results.size()){
+        message = String.format(Locale.ROOT,"%s(...) only works with a 2 values (numerator,denominator) but 1 was provided", constructingFactory.getFunctionName(getClass())); 
+      }
+      else{
+        message = String.format(Locale.ROOT,"%s(...) only works with a 2 values (numerator,denominator) but %d were provided", constructingFactory.getFunctionName(getClass()), results.size());
+      }
+      throw new IOException(message);
+    }
+    
+    BigDecimal numerator = results.get(0);
+    BigDecimal denominator = results.get(1);
+    
+    if(null == numerator){
+      throw new IOException(String.format(Locale.ROOT,"Unable to %s(...) with a null numerator", constructingFactory.getFunctionName(getClass())));
+    }
+    
+    if(null == denominator){
+      throw new IOException(String.format(Locale.ROOT,"Unable to %s(...) with a null denominator", constructingFactory.getFunctionName(getClass())));
+    }
+    
+    if(0 == denominator.compareTo(BigDecimal.ZERO)){
+      throw new IOException(String.format(Locale.ROOT,"Unable to %s(...) with a 0 denominator", constructingFactory.getFunctionName(getClass())));
+    }
+    
+    return normalizeType(numerator.remainder(denominator, MathContext.DECIMAL64));
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/NumberEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/NumberEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/NumberEvaluator.java
index 283c7b1..f4491fd 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/NumberEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/NumberEvaluator.java
@@ -26,13 +26,11 @@ import java.util.List;
 import java.util.Locale;
 
 import org.apache.solr.client.solrj.io.Tuple;
-import org.apache.solr.client.solrj.io.stream.StreamContext;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 public abstract class NumberEvaluator extends ComplexEvaluator {
   protected static final long serialVersionUID = 1L;
-  protected StreamContext streamContext;
   
   public NumberEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
     super(expression, factory);
@@ -40,10 +38,6 @@ public abstract class NumberEvaluator extends ComplexEvaluator {
   
   // restrict result to a Number
   public abstract Number evaluate(Tuple tuple) throws IOException;
-
-  public void setStreamContext(StreamContext context) {
-    this.streamContext = context;
-  }
   
   public List<BigDecimal> evaluateAll(final Tuple tuple) throws IOException {
     // evaluate each and confirm they are all either null or numeric

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PowerEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PowerEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PowerEvaluator.java
new file mode 100644
index 0000000..a8245b6
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/PowerEvaluator.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class PowerEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public PowerEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(2 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting exactly two values but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    if(results.stream().anyMatch(item -> null == item)){
+      return null;
+    }
+    
+    BigDecimal value = results.get(0);
+    BigDecimal exponent = results.get(1);
+    
+    double result = Math.pow(value.doubleValue(), exponent.doubleValue());
+    if(Double.isNaN(result)){
+      return result;
+    }
+    
+    return normalizeType(BigDecimal.valueOf(result));
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RoundEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RoundEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RoundEvaluator.java
new file mode 100644
index 0000000..a34cdf4
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/RoundEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class RoundEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public RoundEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.round(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SineEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SineEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SineEvaluator.java
new file mode 100644
index 0000000..1e2fbb5
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SineEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class SineEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public SineEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.sin(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SquareRootEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SquareRootEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SquareRootEvaluator.java
new file mode 100644
index 0000000..74b9d81
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SquareRootEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class SquareRootEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public SquareRootEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.sqrt(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TangentEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TangentEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TangentEvaluator.java
new file mode 100644
index 0000000..d2a0476
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/TangentEvaluator.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * 
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Locale;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class TangentEvaluator extends NumberEvaluator {
+  protected static final long serialVersionUID = 1L;
+  
+  public TangentEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    super(expression, factory);
+    
+    if(1 != subEvaluators.size()){
+      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expecting one value but found %d",expression,subEvaluators.size()));
+    }
+  }
+
+  @Override
+  public Number evaluate(Tuple tuple) throws IOException {
+    
+    List<BigDecimal> results = evaluateAll(tuple);
+    
+    // we're still doing these checks because if we ever add an array-flatten evaluator, 
+    // one found in the constructor could become != 1
+    if(1 != results.size()){
+      throw new IOException(String.format(Locale.ROOT,"%s(...) only works with a 1 value but %d were provided", constructingFactory.getFunctionName(getClass()), results.size()));
+    }
+    
+    if(null == results.get(0)){
+      return null;
+    }
+    
+    return Math.tan(results.get(0).doubleValue());
+  }  
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcCosineEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcCosineEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcCosineEvaluatorTest.java
new file mode 100644
index 0000000..6a99a1c
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcCosineEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.ArcCosineEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class ArcCosineEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public ArcCosineEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("acos", ArcCosineEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("acos(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.acos(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("acos()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("acos(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("acos(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcSineEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcSineEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcSineEvaluatorTest.java
new file mode 100644
index 0000000..79e934b
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcSineEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.ArcSineEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class ArcSineEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public ArcSineEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("asin", ArcSineEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("asin(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.asin(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("asin()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("asin(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("asin(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcTangentEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcTangentEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcTangentEvaluatorTest.java
new file mode 100644
index 0000000..7af225b
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/ArcTangentEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.ArcTangentEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class ArcTangentEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public ArcTangentEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("atan", ArcTangentEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("atan(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.atan(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("atan()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("atan(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("atan(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CeilingEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CeilingEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CeilingEvaluatorTest.java
new file mode 100644
index 0000000..03395d2
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CeilingEvaluatorTest.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.CeilingEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class CeilingEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public CeilingEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("ceil", CeilingEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+    
+  @Test
+  public void ceilingOneField() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("ceil(a)");
+    Object result;
+    
+    values.clear();
+    values.put("a", 1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Long);
+    Assert.assertEquals(1L, result);
+    
+    values.clear();
+    values.put("a", 1.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Long);
+    Assert.assertEquals(2L, result);
+    
+    values.clear();
+    values.put("a", -1.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertTrue(result instanceof Long);
+    Assert.assertEquals(-1L, result);
+  }
+
+  @Test(expected = IOException.class)
+  public void ceilNoField() throws Exception{
+    factory.constructEvaluator("ceil()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void ceilTwoFields() throws Exception{
+    factory.constructEvaluator("ceil(a,b)");
+  }
+  
+  @Test
+  public void ceilNoValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("ceil(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void ceilNullValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("ceil(a)");
+    
+    values.clear();
+    values.put("a", null);
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CoalesceEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CoalesceEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CoalesceEvaluatorTest.java
new file mode 100644
index 0000000..79f46e7
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CoalesceEvaluatorTest.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for multitional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.CoalesceEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class CoalesceEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public CoalesceEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("coalesce", CoalesceEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+    
+  @Test
+  public void twoFieldsWithValues() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("coalesce(a,b)");
+    Object result;
+    
+    values.clear();
+    values.put("a", null);
+    values.put("b", 2);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertEquals(2, result);
+    
+    values.clear();
+    values.put("a", 1.1);
+    values.put("b", null);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertEquals(1.1D, result);
+    
+    values.clear();
+    values.put("a", "foo");
+    values.put("b", 2.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertEquals("foo", result);
+
+    values.clear();
+    values.put("a", true);
+    values.put("b", 2.1);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertEquals(true, result);
+    
+
+    values.clear();
+    values.put("a", null);
+    values.put("b", false);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertEquals(false, result);
+
+    values.clear();
+    values.put("a", null);
+    values.put("b", null);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertNull(result);
+  }
+  
+
+  @Test
+  public void twoFieldsWithMissingField() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("coalesce(a,b)");
+    Object result;
+    
+    values.clear();
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertNull(result);
+    
+  }
+  
+  @Test
+  public void manyFieldsWithSubcoalesces() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("coalesce(a,b,coalesce(c,d))");
+    Object result;
+    
+    values.clear();
+    values.put("a", 1);
+    values.put("b", null);
+    values.put("c", null);
+    values.put("d", 4);
+    result = evaluator.evaluate(new Tuple(values));
+    Assert.assertEquals(1, result);
+  }
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/674ce4e8/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CosineEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CosineEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CosineEvaluatorTest.java
new file mode 100644
index 0000000..6bb6913
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/CosineEvaluatorTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.CosineEvaluator;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class CosineEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public CosineEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("cos", CosineEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+  
+  private void test(Double value) throws IOException{
+    StreamEvaluator evaluator = factory.constructEvaluator("cos(a)");
+    
+    values.clear();
+    values.put("a", value);
+    Object result = evaluator.evaluate(new Tuple(values));
+    
+    if(null == value){
+      Assert.assertNull(result);
+    }
+    else{
+      Assert.assertTrue(result instanceof Double);
+      Assert.assertEquals(Math.cos(value), result);
+    }
+  }
+    
+  @Test
+  public void oneField() throws Exception{
+    test(90D);
+    test(45D);
+    test(12.4D);
+    test(-45D);
+  }
+
+  @Test(expected = IOException.class)
+  public void noField() throws Exception{
+    factory.constructEvaluator("cos()");
+  }
+  
+  @Test(expected = IOException.class)
+  public void twoFields() throws Exception{
+    factory.constructEvaluator("cos(a,b)");
+  }
+  
+  @Test
+  public void noValue() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("cos(a)");
+    
+    values.clear();
+    Object result = evaluator.evaluate(new Tuple(values));
+    assertNull(result);
+  }
+  @Test
+  public void nullValue() throws Exception{
+    test(null);
+  }
+}


[38/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-7383: Replace DIH 'rss' example with 'atom' rss example was broken for multiple reasons. atom example showcases the same - and more - features and uses the smallest config file needed to make it work.

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/mapping-FoldToASCII.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/mapping-FoldToASCII.txt b/solr/example/example-DIH/solr/rss/conf/mapping-FoldToASCII.txt
deleted file mode 100644
index 9a84b6e..0000000
--- a/solr/example/example-DIH/solr/rss/conf/mapping-FoldToASCII.txt
+++ /dev/null
@@ -1,3813 +0,0 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# This map converts alphabetic, numeric, and symbolic Unicode characters
-# which are not in the first 127 ASCII characters (the "Basic Latin" Unicode
-# block) into their ASCII equivalents, if one exists.
-#
-# Characters from the following Unicode blocks are converted; however, only
-# those characters with reasonable ASCII alternatives are converted:
-#
-# - C1 Controls and Latin-1 Supplement: http://www.unicode.org/charts/PDF/U0080.pdf
-# - Latin Extended-A: http://www.unicode.org/charts/PDF/U0100.pdf
-# - Latin Extended-B: http://www.unicode.org/charts/PDF/U0180.pdf
-# - Latin Extended Additional: http://www.unicode.org/charts/PDF/U1E00.pdf
-# - Latin Extended-C: http://www.unicode.org/charts/PDF/U2C60.pdf
-# - Latin Extended-D: http://www.unicode.org/charts/PDF/UA720.pdf
-# - IPA Extensions: http://www.unicode.org/charts/PDF/U0250.pdf
-# - Phonetic Extensions: http://www.unicode.org/charts/PDF/U1D00.pdf
-# - Phonetic Extensions Supplement: http://www.unicode.org/charts/PDF/U1D80.pdf
-# - General Punctuation: http://www.unicode.org/charts/PDF/U2000.pdf
-# - Superscripts and Subscripts: http://www.unicode.org/charts/PDF/U2070.pdf
-# - Enclosed Alphanumerics: http://www.unicode.org/charts/PDF/U2460.pdf
-# - Dingbats: http://www.unicode.org/charts/PDF/U2700.pdf
-# - Supplemental Punctuation: http://www.unicode.org/charts/PDF/U2E00.pdf
-# - Alphabetic Presentation Forms: http://www.unicode.org/charts/PDF/UFB00.pdf
-# - Halfwidth and Fullwidth Forms: http://www.unicode.org/charts/PDF/UFF00.pdf
-#  
-# See: http://en.wikipedia.org/wiki/Latin_characters_in_Unicode
-#
-# The set of character conversions supported by this map is a superset of
-# those supported by the map represented by mapping-ISOLatin1Accent.txt.
-#
-# See the bottom of this file for the Perl script used to generate the contents
-# of this file (without this header) from ASCIIFoldingFilter.java.
-
-
-# Syntax:
-#   "source" => "target"
-#     "source".length() > 0 (source cannot be empty.)
-#     "target".length() >= 0 (target can be empty.)
-
-
-# �  [LATIN CAPITAL LETTER A WITH GRAVE]
-"\u00C0" => "A"
-
-# �  [LATIN CAPITAL LETTER A WITH ACUTE]
-"\u00C1" => "A"
-
-# �  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX]
-"\u00C2" => "A"
-
-# �  [LATIN CAPITAL LETTER A WITH TILDE]
-"\u00C3" => "A"
-
-# �  [LATIN CAPITAL LETTER A WITH DIAERESIS]
-"\u00C4" => "A"
-
-# �  [LATIN CAPITAL LETTER A WITH RING ABOVE]
-"\u00C5" => "A"
-
-# \u0100  [LATIN CAPITAL LETTER A WITH MACRON]
-"\u0100" => "A"
-
-# \u0102  [LATIN CAPITAL LETTER A WITH BREVE]
-"\u0102" => "A"
-
-# \u0104  [LATIN CAPITAL LETTER A WITH OGONEK]
-"\u0104" => "A"
-
-# \u018f  http://en.wikipedia.org/wiki/Schwa  [LATIN CAPITAL LETTER SCHWA]
-"\u018F" => "A"
-
-# \u01cd  [LATIN CAPITAL LETTER A WITH CARON]
-"\u01CD" => "A"
-
-# \u01de  [LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON]
-"\u01DE" => "A"
-
-# \u01e0  [LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON]
-"\u01E0" => "A"
-
-# \u01fa  [LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE]
-"\u01FA" => "A"
-
-# \u0200  [LATIN CAPITAL LETTER A WITH DOUBLE GRAVE]
-"\u0200" => "A"
-
-# \u0202  [LATIN CAPITAL LETTER A WITH INVERTED BREVE]
-"\u0202" => "A"
-
-# \u0226  [LATIN CAPITAL LETTER A WITH DOT ABOVE]
-"\u0226" => "A"
-
-# \u023a  [LATIN CAPITAL LETTER A WITH STROKE]
-"\u023A" => "A"
-
-# \u1d00  [LATIN LETTER SMALL CAPITAL A]
-"\u1D00" => "A"
-
-# \u1e00  [LATIN CAPITAL LETTER A WITH RING BELOW]
-"\u1E00" => "A"
-
-# \u1ea0  [LATIN CAPITAL LETTER A WITH DOT BELOW]
-"\u1EA0" => "A"
-
-# \u1ea2  [LATIN CAPITAL LETTER A WITH HOOK ABOVE]
-"\u1EA2" => "A"
-
-# \u1ea4  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE]
-"\u1EA4" => "A"
-
-# \u1ea6  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE]
-"\u1EA6" => "A"
-
-# \u1ea8  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE]
-"\u1EA8" => "A"
-
-# \u1eaa  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE]
-"\u1EAA" => "A"
-
-# \u1eac  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW]
-"\u1EAC" => "A"
-
-# \u1eae  [LATIN CAPITAL LETTER A WITH BREVE AND ACUTE]
-"\u1EAE" => "A"
-
-# \u1eb0  [LATIN CAPITAL LETTER A WITH BREVE AND GRAVE]
-"\u1EB0" => "A"
-
-# \u1eb2  [LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE]
-"\u1EB2" => "A"
-
-# \u1eb4  [LATIN CAPITAL LETTER A WITH BREVE AND TILDE]
-"\u1EB4" => "A"
-
-# \u1eb6  [LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW]
-"\u1EB6" => "A"
-
-# \u24b6  [CIRCLED LATIN CAPITAL LETTER A]
-"\u24B6" => "A"
-
-# \uff21  [FULLWIDTH LATIN CAPITAL LETTER A]
-"\uFF21" => "A"
-
-# �  [LATIN SMALL LETTER A WITH GRAVE]
-"\u00E0" => "a"
-
-# �  [LATIN SMALL LETTER A WITH ACUTE]
-"\u00E1" => "a"
-
-# �  [LATIN SMALL LETTER A WITH CIRCUMFLEX]
-"\u00E2" => "a"
-
-# �  [LATIN SMALL LETTER A WITH TILDE]
-"\u00E3" => "a"
-
-# �  [LATIN SMALL LETTER A WITH DIAERESIS]
-"\u00E4" => "a"
-
-# �  [LATIN SMALL LETTER A WITH RING ABOVE]
-"\u00E5" => "a"
-
-# \u0101  [LATIN SMALL LETTER A WITH MACRON]
-"\u0101" => "a"
-
-# \u0103  [LATIN SMALL LETTER A WITH BREVE]
-"\u0103" => "a"
-
-# \u0105  [LATIN SMALL LETTER A WITH OGONEK]
-"\u0105" => "a"
-
-# \u01ce  [LATIN SMALL LETTER A WITH CARON]
-"\u01CE" => "a"
-
-# \u01df  [LATIN SMALL LETTER A WITH DIAERESIS AND MACRON]
-"\u01DF" => "a"
-
-# \u01e1  [LATIN SMALL LETTER A WITH DOT ABOVE AND MACRON]
-"\u01E1" => "a"
-
-# \u01fb  [LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE]
-"\u01FB" => "a"
-
-# \u0201  [LATIN SMALL LETTER A WITH DOUBLE GRAVE]
-"\u0201" => "a"
-
-# \u0203  [LATIN SMALL LETTER A WITH INVERTED BREVE]
-"\u0203" => "a"
-
-# \u0227  [LATIN SMALL LETTER A WITH DOT ABOVE]
-"\u0227" => "a"
-
-# \u0250  [LATIN SMALL LETTER TURNED A]
-"\u0250" => "a"
-
-# \u0259  [LATIN SMALL LETTER SCHWA]
-"\u0259" => "a"
-
-# \u025a  [LATIN SMALL LETTER SCHWA WITH HOOK]
-"\u025A" => "a"
-
-# \u1d8f  [LATIN SMALL LETTER A WITH RETROFLEX HOOK]
-"\u1D8F" => "a"
-
-# \u1d95  [LATIN SMALL LETTER SCHWA WITH RETROFLEX HOOK]
-"\u1D95" => "a"
-
-# \u1ea1  [LATIN SMALL LETTER A WITH RING BELOW]
-"\u1E01" => "a"
-
-# \u1ea3  [LATIN SMALL LETTER A WITH RIGHT HALF RING]
-"\u1E9A" => "a"
-
-# \u1ea1  [LATIN SMALL LETTER A WITH DOT BELOW]
-"\u1EA1" => "a"
-
-# \u1ea3  [LATIN SMALL LETTER A WITH HOOK ABOVE]
-"\u1EA3" => "a"
-
-# \u1ea5  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE]
-"\u1EA5" => "a"
-
-# \u1ea7  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE]
-"\u1EA7" => "a"
-
-# \u1ea9  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE]
-"\u1EA9" => "a"
-
-# \u1eab  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE]
-"\u1EAB" => "a"
-
-# \u1ead  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW]
-"\u1EAD" => "a"
-
-# \u1eaf  [LATIN SMALL LETTER A WITH BREVE AND ACUTE]
-"\u1EAF" => "a"
-
-# \u1eb1  [LATIN SMALL LETTER A WITH BREVE AND GRAVE]
-"\u1EB1" => "a"
-
-# \u1eb3  [LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE]
-"\u1EB3" => "a"
-
-# \u1eb5  [LATIN SMALL LETTER A WITH BREVE AND TILDE]
-"\u1EB5" => "a"
-
-# \u1eb7  [LATIN SMALL LETTER A WITH BREVE AND DOT BELOW]
-"\u1EB7" => "a"
-
-# \u2090  [LATIN SUBSCRIPT SMALL LETTER A]
-"\u2090" => "a"
-
-# \u2094  [LATIN SUBSCRIPT SMALL LETTER SCHWA]
-"\u2094" => "a"
-
-# \u24d0  [CIRCLED LATIN SMALL LETTER A]
-"\u24D0" => "a"
-
-# \u2c65  [LATIN SMALL LETTER A WITH STROKE]
-"\u2C65" => "a"
-
-# \u2c6f  [LATIN CAPITAL LETTER TURNED A]
-"\u2C6F" => "a"
-
-# \uff41  [FULLWIDTH LATIN SMALL LETTER A]
-"\uFF41" => "a"
-
-# \ua732  [LATIN CAPITAL LETTER AA]
-"\uA732" => "AA"
-
-# �  [LATIN CAPITAL LETTER AE]
-"\u00C6" => "AE"
-
-# \u01e2  [LATIN CAPITAL LETTER AE WITH MACRON]
-"\u01E2" => "AE"
-
-# \u01fc  [LATIN CAPITAL LETTER AE WITH ACUTE]
-"\u01FC" => "AE"
-
-# \u1d01  [LATIN LETTER SMALL CAPITAL AE]
-"\u1D01" => "AE"
-
-# \ua734  [LATIN CAPITAL LETTER AO]
-"\uA734" => "AO"
-
-# \ua736  [LATIN CAPITAL LETTER AU]
-"\uA736" => "AU"
-
-# \ua738  [LATIN CAPITAL LETTER AV]
-"\uA738" => "AV"
-
-# \ua73a  [LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR]
-"\uA73A" => "AV"
-
-# \ua73c  [LATIN CAPITAL LETTER AY]
-"\uA73C" => "AY"
-
-# \u249c  [PARENTHESIZED LATIN SMALL LETTER A]
-"\u249C" => "(a)"
-
-# \ua733  [LATIN SMALL LETTER AA]
-"\uA733" => "aa"
-
-# �  [LATIN SMALL LETTER AE]
-"\u00E6" => "ae"
-
-# \u01e3  [LATIN SMALL LETTER AE WITH MACRON]
-"\u01E3" => "ae"
-
-# \u01fd  [LATIN SMALL LETTER AE WITH ACUTE]
-"\u01FD" => "ae"
-
-# \u1d02  [LATIN SMALL LETTER TURNED AE]
-"\u1D02" => "ae"
-
-# \ua735  [LATIN SMALL LETTER AO]
-"\uA735" => "ao"
-
-# \ua737  [LATIN SMALL LETTER AU]
-"\uA737" => "au"
-
-# \ua739  [LATIN SMALL LETTER AV]
-"\uA739" => "av"
-
-# \ua73b  [LATIN SMALL LETTER AV WITH HORIZONTAL BAR]
-"\uA73B" => "av"
-
-# \ua73d  [LATIN SMALL LETTER AY]
-"\uA73D" => "ay"
-
-# \u0181  [LATIN CAPITAL LETTER B WITH HOOK]
-"\u0181" => "B"
-
-# \u0182  [LATIN CAPITAL LETTER B WITH TOPBAR]
-"\u0182" => "B"
-
-# \u0243  [LATIN CAPITAL LETTER B WITH STROKE]
-"\u0243" => "B"
-
-# \u0299  [LATIN LETTER SMALL CAPITAL B]
-"\u0299" => "B"
-
-# \u1d03  [LATIN LETTER SMALL CAPITAL BARRED B]
-"\u1D03" => "B"
-
-# \u1e02  [LATIN CAPITAL LETTER B WITH DOT ABOVE]
-"\u1E02" => "B"
-
-# \u1e04  [LATIN CAPITAL LETTER B WITH DOT BELOW]
-"\u1E04" => "B"
-
-# \u1e06  [LATIN CAPITAL LETTER B WITH LINE BELOW]
-"\u1E06" => "B"
-
-# \u24b7  [CIRCLED LATIN CAPITAL LETTER B]
-"\u24B7" => "B"
-
-# \uff22  [FULLWIDTH LATIN CAPITAL LETTER B]
-"\uFF22" => "B"
-
-# \u0180  [LATIN SMALL LETTER B WITH STROKE]
-"\u0180" => "b"
-
-# \u0183  [LATIN SMALL LETTER B WITH TOPBAR]
-"\u0183" => "b"
-
-# \u0253  [LATIN SMALL LETTER B WITH HOOK]
-"\u0253" => "b"
-
-# \u1d6c  [LATIN SMALL LETTER B WITH MIDDLE TILDE]
-"\u1D6C" => "b"
-
-# \u1d80  [LATIN SMALL LETTER B WITH PALATAL HOOK]
-"\u1D80" => "b"
-
-# \u1e03  [LATIN SMALL LETTER B WITH DOT ABOVE]
-"\u1E03" => "b"
-
-# \u1e05  [LATIN SMALL LETTER B WITH DOT BELOW]
-"\u1E05" => "b"
-
-# \u1e07  [LATIN SMALL LETTER B WITH LINE BELOW]
-"\u1E07" => "b"
-
-# \u24d1  [CIRCLED LATIN SMALL LETTER B]
-"\u24D1" => "b"
-
-# \uff42  [FULLWIDTH LATIN SMALL LETTER B]
-"\uFF42" => "b"
-
-# \u249d  [PARENTHESIZED LATIN SMALL LETTER B]
-"\u249D" => "(b)"
-
-# �  [LATIN CAPITAL LETTER C WITH CEDILLA]
-"\u00C7" => "C"
-
-# \u0106  [LATIN CAPITAL LETTER C WITH ACUTE]
-"\u0106" => "C"
-
-# \u0108  [LATIN CAPITAL LETTER C WITH CIRCUMFLEX]
-"\u0108" => "C"
-
-# \u010a  [LATIN CAPITAL LETTER C WITH DOT ABOVE]
-"\u010A" => "C"
-
-# \u010c  [LATIN CAPITAL LETTER C WITH CARON]
-"\u010C" => "C"
-
-# \u0187  [LATIN CAPITAL LETTER C WITH HOOK]
-"\u0187" => "C"
-
-# \u023b  [LATIN CAPITAL LETTER C WITH STROKE]
-"\u023B" => "C"
-
-# \u0297  [LATIN LETTER STRETCHED C]
-"\u0297" => "C"
-
-# \u1d04  [LATIN LETTER SMALL CAPITAL C]
-"\u1D04" => "C"
-
-# \u1e08  [LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE]
-"\u1E08" => "C"
-
-# \u24b8  [CIRCLED LATIN CAPITAL LETTER C]
-"\u24B8" => "C"
-
-# \uff23  [FULLWIDTH LATIN CAPITAL LETTER C]
-"\uFF23" => "C"
-
-# �  [LATIN SMALL LETTER C WITH CEDILLA]
-"\u00E7" => "c"
-
-# \u0107  [LATIN SMALL LETTER C WITH ACUTE]
-"\u0107" => "c"
-
-# \u0109  [LATIN SMALL LETTER C WITH CIRCUMFLEX]
-"\u0109" => "c"
-
-# \u010b  [LATIN SMALL LETTER C WITH DOT ABOVE]
-"\u010B" => "c"
-
-# \u010d  [LATIN SMALL LETTER C WITH CARON]
-"\u010D" => "c"
-
-# \u0188  [LATIN SMALL LETTER C WITH HOOK]
-"\u0188" => "c"
-
-# \u023c  [LATIN SMALL LETTER C WITH STROKE]
-"\u023C" => "c"
-
-# \u0255  [LATIN SMALL LETTER C WITH CURL]
-"\u0255" => "c"
-
-# \u1e09  [LATIN SMALL LETTER C WITH CEDILLA AND ACUTE]
-"\u1E09" => "c"
-
-# \u2184  [LATIN SMALL LETTER REVERSED C]
-"\u2184" => "c"
-
-# \u24d2  [CIRCLED LATIN SMALL LETTER C]
-"\u24D2" => "c"
-
-# \ua73e  [LATIN CAPITAL LETTER REVERSED C WITH DOT]
-"\uA73E" => "c"
-
-# \ua73f  [LATIN SMALL LETTER REVERSED C WITH DOT]
-"\uA73F" => "c"
-
-# \uff43  [FULLWIDTH LATIN SMALL LETTER C]
-"\uFF43" => "c"
-
-# \u249e  [PARENTHESIZED LATIN SMALL LETTER C]
-"\u249E" => "(c)"
-
-# �  [LATIN CAPITAL LETTER ETH]
-"\u00D0" => "D"
-
-# \u010e  [LATIN CAPITAL LETTER D WITH CARON]
-"\u010E" => "D"
-
-# \u0110  [LATIN CAPITAL LETTER D WITH STROKE]
-"\u0110" => "D"
-
-# \u0189  [LATIN CAPITAL LETTER AFRICAN D]
-"\u0189" => "D"
-
-# \u018a  [LATIN CAPITAL LETTER D WITH HOOK]
-"\u018A" => "D"
-
-# \u018b  [LATIN CAPITAL LETTER D WITH TOPBAR]
-"\u018B" => "D"
-
-# \u1d05  [LATIN LETTER SMALL CAPITAL D]
-"\u1D05" => "D"
-
-# \u1d06  [LATIN LETTER SMALL CAPITAL ETH]
-"\u1D06" => "D"
-
-# \u1e0a  [LATIN CAPITAL LETTER D WITH DOT ABOVE]
-"\u1E0A" => "D"
-
-# \u1e0c  [LATIN CAPITAL LETTER D WITH DOT BELOW]
-"\u1E0C" => "D"
-
-# \u1e0e  [LATIN CAPITAL LETTER D WITH LINE BELOW]
-"\u1E0E" => "D"
-
-# \u1e10  [LATIN CAPITAL LETTER D WITH CEDILLA]
-"\u1E10" => "D"
-
-# \u1e12  [LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW]
-"\u1E12" => "D"
-
-# \u24b9  [CIRCLED LATIN CAPITAL LETTER D]
-"\u24B9" => "D"
-
-# \ua779  [LATIN CAPITAL LETTER INSULAR D]
-"\uA779" => "D"
-
-# \uff24  [FULLWIDTH LATIN CAPITAL LETTER D]
-"\uFF24" => "D"
-
-# �  [LATIN SMALL LETTER ETH]
-"\u00F0" => "d"
-
-# \u010f  [LATIN SMALL LETTER D WITH CARON]
-"\u010F" => "d"
-
-# \u0111  [LATIN SMALL LETTER D WITH STROKE]
-"\u0111" => "d"
-
-# \u018c  [LATIN SMALL LETTER D WITH TOPBAR]
-"\u018C" => "d"
-
-# \u0221  [LATIN SMALL LETTER D WITH CURL]
-"\u0221" => "d"
-
-# \u0256  [LATIN SMALL LETTER D WITH TAIL]
-"\u0256" => "d"
-
-# \u0257  [LATIN SMALL LETTER D WITH HOOK]
-"\u0257" => "d"
-
-# \u1d6d  [LATIN SMALL LETTER D WITH MIDDLE TILDE]
-"\u1D6D" => "d"
-
-# \u1d81  [LATIN SMALL LETTER D WITH PALATAL HOOK]
-"\u1D81" => "d"
-
-# \u1d91  [LATIN SMALL LETTER D WITH HOOK AND TAIL]
-"\u1D91" => "d"
-
-# \u1e0b  [LATIN SMALL LETTER D WITH DOT ABOVE]
-"\u1E0B" => "d"
-
-# \u1e0d  [LATIN SMALL LETTER D WITH DOT BELOW]
-"\u1E0D" => "d"
-
-# \u1e0f  [LATIN SMALL LETTER D WITH LINE BELOW]
-"\u1E0F" => "d"
-
-# \u1e11  [LATIN SMALL LETTER D WITH CEDILLA]
-"\u1E11" => "d"
-
-# \u1e13  [LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW]
-"\u1E13" => "d"
-
-# \u24d3  [CIRCLED LATIN SMALL LETTER D]
-"\u24D3" => "d"
-
-# \ua77a  [LATIN SMALL LETTER INSULAR D]
-"\uA77A" => "d"
-
-# \uff44  [FULLWIDTH LATIN SMALL LETTER D]
-"\uFF44" => "d"
-
-# \u01c4  [LATIN CAPITAL LETTER DZ WITH CARON]
-"\u01C4" => "DZ"
-
-# \u01f1  [LATIN CAPITAL LETTER DZ]
-"\u01F1" => "DZ"
-
-# \u01c5  [LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON]
-"\u01C5" => "Dz"
-
-# \u01f2  [LATIN CAPITAL LETTER D WITH SMALL LETTER Z]
-"\u01F2" => "Dz"
-
-# \u249f  [PARENTHESIZED LATIN SMALL LETTER D]
-"\u249F" => "(d)"
-
-# \u0238  [LATIN SMALL LETTER DB DIGRAPH]
-"\u0238" => "db"
-
-# \u01c6  [LATIN SMALL LETTER DZ WITH CARON]
-"\u01C6" => "dz"
-
-# \u01f3  [LATIN SMALL LETTER DZ]
-"\u01F3" => "dz"
-
-# \u02a3  [LATIN SMALL LETTER DZ DIGRAPH]
-"\u02A3" => "dz"
-
-# \u02a5  [LATIN SMALL LETTER DZ DIGRAPH WITH CURL]
-"\u02A5" => "dz"
-
-# �  [LATIN CAPITAL LETTER E WITH GRAVE]
-"\u00C8" => "E"
-
-# �  [LATIN CAPITAL LETTER E WITH ACUTE]
-"\u00C9" => "E"
-
-# �  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX]
-"\u00CA" => "E"
-
-# �  [LATIN CAPITAL LETTER E WITH DIAERESIS]
-"\u00CB" => "E"
-
-# \u0112  [LATIN CAPITAL LETTER E WITH MACRON]
-"\u0112" => "E"
-
-# \u0114  [LATIN CAPITAL LETTER E WITH BREVE]
-"\u0114" => "E"
-
-# \u0116  [LATIN CAPITAL LETTER E WITH DOT ABOVE]
-"\u0116" => "E"
-
-# \u0118  [LATIN CAPITAL LETTER E WITH OGONEK]
-"\u0118" => "E"
-
-# \u011a  [LATIN CAPITAL LETTER E WITH CARON]
-"\u011A" => "E"
-
-# \u018e  [LATIN CAPITAL LETTER REVERSED E]
-"\u018E" => "E"
-
-# \u0190  [LATIN CAPITAL LETTER OPEN E]
-"\u0190" => "E"
-
-# \u0204  [LATIN CAPITAL LETTER E WITH DOUBLE GRAVE]
-"\u0204" => "E"
-
-# \u0206  [LATIN CAPITAL LETTER E WITH INVERTED BREVE]
-"\u0206" => "E"
-
-# \u0228  [LATIN CAPITAL LETTER E WITH CEDILLA]
-"\u0228" => "E"
-
-# \u0246  [LATIN CAPITAL LETTER E WITH STROKE]
-"\u0246" => "E"
-
-# \u1d07  [LATIN LETTER SMALL CAPITAL E]
-"\u1D07" => "E"
-
-# \u1e14  [LATIN CAPITAL LETTER E WITH MACRON AND GRAVE]
-"\u1E14" => "E"
-
-# \u1e16  [LATIN CAPITAL LETTER E WITH MACRON AND ACUTE]
-"\u1E16" => "E"
-
-# \u1e18  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW]
-"\u1E18" => "E"
-
-# \u1e1a  [LATIN CAPITAL LETTER E WITH TILDE BELOW]
-"\u1E1A" => "E"
-
-# \u1e1c  [LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE]
-"\u1E1C" => "E"
-
-# \u1eb8  [LATIN CAPITAL LETTER E WITH DOT BELOW]
-"\u1EB8" => "E"
-
-# \u1eba  [LATIN CAPITAL LETTER E WITH HOOK ABOVE]
-"\u1EBA" => "E"
-
-# \u1ebc  [LATIN CAPITAL LETTER E WITH TILDE]
-"\u1EBC" => "E"
-
-# \u1ebe  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE]
-"\u1EBE" => "E"
-
-# \u1ec0  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE]
-"\u1EC0" => "E"
-
-# \u1ec2  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE]
-"\u1EC2" => "E"
-
-# \u1ec4  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE]
-"\u1EC4" => "E"
-
-# \u1ec6  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW]
-"\u1EC6" => "E"
-
-# \u24ba  [CIRCLED LATIN CAPITAL LETTER E]
-"\u24BA" => "E"
-
-# \u2c7b  [LATIN LETTER SMALL CAPITAL TURNED E]
-"\u2C7B" => "E"
-
-# \uff25  [FULLWIDTH LATIN CAPITAL LETTER E]
-"\uFF25" => "E"
-
-# �  [LATIN SMALL LETTER E WITH GRAVE]
-"\u00E8" => "e"
-
-# �  [LATIN SMALL LETTER E WITH ACUTE]
-"\u00E9" => "e"
-
-# �  [LATIN SMALL LETTER E WITH CIRCUMFLEX]
-"\u00EA" => "e"
-
-# �  [LATIN SMALL LETTER E WITH DIAERESIS]
-"\u00EB" => "e"
-
-# \u0113  [LATIN SMALL LETTER E WITH MACRON]
-"\u0113" => "e"
-
-# \u0115  [LATIN SMALL LETTER E WITH BREVE]
-"\u0115" => "e"
-
-# \u0117  [LATIN SMALL LETTER E WITH DOT ABOVE]
-"\u0117" => "e"
-
-# \u0119  [LATIN SMALL LETTER E WITH OGONEK]
-"\u0119" => "e"
-
-# \u011b  [LATIN SMALL LETTER E WITH CARON]
-"\u011B" => "e"
-
-# \u01dd  [LATIN SMALL LETTER TURNED E]
-"\u01DD" => "e"
-
-# \u0205  [LATIN SMALL LETTER E WITH DOUBLE GRAVE]
-"\u0205" => "e"
-
-# \u0207  [LATIN SMALL LETTER E WITH INVERTED BREVE]
-"\u0207" => "e"
-
-# \u0229  [LATIN SMALL LETTER E WITH CEDILLA]
-"\u0229" => "e"
-
-# \u0247  [LATIN SMALL LETTER E WITH STROKE]
-"\u0247" => "e"
-
-# \u0258  [LATIN SMALL LETTER REVERSED E]
-"\u0258" => "e"
-
-# \u025b  [LATIN SMALL LETTER OPEN E]
-"\u025B" => "e"
-
-# \u025c  [LATIN SMALL LETTER REVERSED OPEN E]
-"\u025C" => "e"
-
-# \u025d  [LATIN SMALL LETTER REVERSED OPEN E WITH HOOK]
-"\u025D" => "e"
-
-# \u025e  [LATIN SMALL LETTER CLOSED REVERSED OPEN E]
-"\u025E" => "e"
-
-# \u029a  [LATIN SMALL LETTER CLOSED OPEN E]
-"\u029A" => "e"
-
-# \u1d08  [LATIN SMALL LETTER TURNED OPEN E]
-"\u1D08" => "e"
-
-# \u1d92  [LATIN SMALL LETTER E WITH RETROFLEX HOOK]
-"\u1D92" => "e"
-
-# \u1d93  [LATIN SMALL LETTER OPEN E WITH RETROFLEX HOOK]
-"\u1D93" => "e"
-
-# \u1d94  [LATIN SMALL LETTER REVERSED OPEN E WITH RETROFLEX HOOK]
-"\u1D94" => "e"
-
-# \u1e15  [LATIN SMALL LETTER E WITH MACRON AND GRAVE]
-"\u1E15" => "e"
-
-# \u1e17  [LATIN SMALL LETTER E WITH MACRON AND ACUTE]
-"\u1E17" => "e"
-
-# \u1e19  [LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW]
-"\u1E19" => "e"
-
-# \u1e1b  [LATIN SMALL LETTER E WITH TILDE BELOW]
-"\u1E1B" => "e"
-
-# \u1e1d  [LATIN SMALL LETTER E WITH CEDILLA AND BREVE]
-"\u1E1D" => "e"
-
-# \u1eb9  [LATIN SMALL LETTER E WITH DOT BELOW]
-"\u1EB9" => "e"
-
-# \u1ebb  [LATIN SMALL LETTER E WITH HOOK ABOVE]
-"\u1EBB" => "e"
-
-# \u1ebd  [LATIN SMALL LETTER E WITH TILDE]
-"\u1EBD" => "e"
-
-# \u1ebf  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE]
-"\u1EBF" => "e"
-
-# \u1ec1  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE]
-"\u1EC1" => "e"
-
-# \u1ec3  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE]
-"\u1EC3" => "e"
-
-# \u1ec5  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE]
-"\u1EC5" => "e"
-
-# \u1ec7  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW]
-"\u1EC7" => "e"
-
-# \u2091  [LATIN SUBSCRIPT SMALL LETTER E]
-"\u2091" => "e"
-
-# \u24d4  [CIRCLED LATIN SMALL LETTER E]
-"\u24D4" => "e"
-
-# \u2c78  [LATIN SMALL LETTER E WITH NOTCH]
-"\u2C78" => "e"
-
-# \uff45  [FULLWIDTH LATIN SMALL LETTER E]
-"\uFF45" => "e"
-
-# \u24a0  [PARENTHESIZED LATIN SMALL LETTER E]
-"\u24A0" => "(e)"
-
-# \u0191  [LATIN CAPITAL LETTER F WITH HOOK]
-"\u0191" => "F"
-
-# \u1e1e  [LATIN CAPITAL LETTER F WITH DOT ABOVE]
-"\u1E1E" => "F"
-
-# \u24bb  [CIRCLED LATIN CAPITAL LETTER F]
-"\u24BB" => "F"
-
-# \ua730  [LATIN LETTER SMALL CAPITAL F]
-"\uA730" => "F"
-
-# \ua77b  [LATIN CAPITAL LETTER INSULAR F]
-"\uA77B" => "F"
-
-# \ua7fb  [LATIN EPIGRAPHIC LETTER REVERSED F]
-"\uA7FB" => "F"
-
-# \uff26  [FULLWIDTH LATIN CAPITAL LETTER F]
-"\uFF26" => "F"
-
-# \u0192  [LATIN SMALL LETTER F WITH HOOK]
-"\u0192" => "f"
-
-# \u1d6e  [LATIN SMALL LETTER F WITH MIDDLE TILDE]
-"\u1D6E" => "f"
-
-# \u1d82  [LATIN SMALL LETTER F WITH PALATAL HOOK]
-"\u1D82" => "f"
-
-# \u1e1f  [LATIN SMALL LETTER F WITH DOT ABOVE]
-"\u1E1F" => "f"
-
-# \u1e9b  [LATIN SMALL LETTER LONG S WITH DOT ABOVE]
-"\u1E9B" => "f"
-
-# \u24d5  [CIRCLED LATIN SMALL LETTER F]
-"\u24D5" => "f"
-
-# \ua77c  [LATIN SMALL LETTER INSULAR F]
-"\uA77C" => "f"
-
-# \uff46  [FULLWIDTH LATIN SMALL LETTER F]
-"\uFF46" => "f"
-
-# \u24a1  [PARENTHESIZED LATIN SMALL LETTER F]
-"\u24A1" => "(f)"
-
-# \ufb00  [LATIN SMALL LIGATURE FF]
-"\uFB00" => "ff"
-
-# \ufb03  [LATIN SMALL LIGATURE FFI]
-"\uFB03" => "ffi"
-
-# \ufb04  [LATIN SMALL LIGATURE FFL]
-"\uFB04" => "ffl"
-
-# \ufb01  [LATIN SMALL LIGATURE FI]
-"\uFB01" => "fi"
-
-# \ufb02  [LATIN SMALL LIGATURE FL]
-"\uFB02" => "fl"
-
-# \u011c  [LATIN CAPITAL LETTER G WITH CIRCUMFLEX]
-"\u011C" => "G"
-
-# \u011e  [LATIN CAPITAL LETTER G WITH BREVE]
-"\u011E" => "G"
-
-# \u0120  [LATIN CAPITAL LETTER G WITH DOT ABOVE]
-"\u0120" => "G"
-
-# \u0122  [LATIN CAPITAL LETTER G WITH CEDILLA]
-"\u0122" => "G"
-
-# \u0193  [LATIN CAPITAL LETTER G WITH HOOK]
-"\u0193" => "G"
-
-# \u01e4  [LATIN CAPITAL LETTER G WITH STROKE]
-"\u01E4" => "G"
-
-# \u01e5  [LATIN SMALL LETTER G WITH STROKE]
-"\u01E5" => "G"
-
-# \u01e6  [LATIN CAPITAL LETTER G WITH CARON]
-"\u01E6" => "G"
-
-# \u01e7  [LATIN SMALL LETTER G WITH CARON]
-"\u01E7" => "G"
-
-# \u01f4  [LATIN CAPITAL LETTER G WITH ACUTE]
-"\u01F4" => "G"
-
-# \u0262  [LATIN LETTER SMALL CAPITAL G]
-"\u0262" => "G"
-
-# \u029b  [LATIN LETTER SMALL CAPITAL G WITH HOOK]
-"\u029B" => "G"
-
-# \u1e20  [LATIN CAPITAL LETTER G WITH MACRON]
-"\u1E20" => "G"
-
-# \u24bc  [CIRCLED LATIN CAPITAL LETTER G]
-"\u24BC" => "G"
-
-# \ua77d  [LATIN CAPITAL LETTER INSULAR G]
-"\uA77D" => "G"
-
-# \ua77e  [LATIN CAPITAL LETTER TURNED INSULAR G]
-"\uA77E" => "G"
-
-# \uff27  [FULLWIDTH LATIN CAPITAL LETTER G]
-"\uFF27" => "G"
-
-# \u011d  [LATIN SMALL LETTER G WITH CIRCUMFLEX]
-"\u011D" => "g"
-
-# \u011f  [LATIN SMALL LETTER G WITH BREVE]
-"\u011F" => "g"
-
-# \u0121  [LATIN SMALL LETTER G WITH DOT ABOVE]
-"\u0121" => "g"
-
-# \u0123  [LATIN SMALL LETTER G WITH CEDILLA]
-"\u0123" => "g"
-
-# \u01f5  [LATIN SMALL LETTER G WITH ACUTE]
-"\u01F5" => "g"
-
-# \u0260  [LATIN SMALL LETTER G WITH HOOK]
-"\u0260" => "g"
-
-# \u0261  [LATIN SMALL LETTER SCRIPT G]
-"\u0261" => "g"
-
-# \u1d77  [LATIN SMALL LETTER TURNED G]
-"\u1D77" => "g"
-
-# \u1d79  [LATIN SMALL LETTER INSULAR G]
-"\u1D79" => "g"
-
-# \u1d83  [LATIN SMALL LETTER G WITH PALATAL HOOK]
-"\u1D83" => "g"
-
-# \u1e21  [LATIN SMALL LETTER G WITH MACRON]
-"\u1E21" => "g"
-
-# \u24d6  [CIRCLED LATIN SMALL LETTER G]
-"\u24D6" => "g"
-
-# \ua77f  [LATIN SMALL LETTER TURNED INSULAR G]
-"\uA77F" => "g"
-
-# \uff47  [FULLWIDTH LATIN SMALL LETTER G]
-"\uFF47" => "g"
-
-# \u24a2  [PARENTHESIZED LATIN SMALL LETTER G]
-"\u24A2" => "(g)"
-
-# \u0124  [LATIN CAPITAL LETTER H WITH CIRCUMFLEX]
-"\u0124" => "H"
-
-# \u0126  [LATIN CAPITAL LETTER H WITH STROKE]
-"\u0126" => "H"
-
-# \u021e  [LATIN CAPITAL LETTER H WITH CARON]
-"\u021E" => "H"
-
-# \u029c  [LATIN LETTER SMALL CAPITAL H]
-"\u029C" => "H"
-
-# \u1e22  [LATIN CAPITAL LETTER H WITH DOT ABOVE]
-"\u1E22" => "H"
-
-# \u1e24  [LATIN CAPITAL LETTER H WITH DOT BELOW]
-"\u1E24" => "H"
-
-# \u1e26  [LATIN CAPITAL LETTER H WITH DIAERESIS]
-"\u1E26" => "H"
-
-# \u1e28  [LATIN CAPITAL LETTER H WITH CEDILLA]
-"\u1E28" => "H"
-
-# \u1e2a  [LATIN CAPITAL LETTER H WITH BREVE BELOW]
-"\u1E2A" => "H"
-
-# \u24bd  [CIRCLED LATIN CAPITAL LETTER H]
-"\u24BD" => "H"
-
-# \u2c67  [LATIN CAPITAL LETTER H WITH DESCENDER]
-"\u2C67" => "H"
-
-# \u2c75  [LATIN CAPITAL LETTER HALF H]
-"\u2C75" => "H"
-
-# \uff28  [FULLWIDTH LATIN CAPITAL LETTER H]
-"\uFF28" => "H"
-
-# \u0125  [LATIN SMALL LETTER H WITH CIRCUMFLEX]
-"\u0125" => "h"
-
-# \u0127  [LATIN SMALL LETTER H WITH STROKE]
-"\u0127" => "h"
-
-# \u021f  [LATIN SMALL LETTER H WITH CARON]
-"\u021F" => "h"
-
-# \u0265  [LATIN SMALL LETTER TURNED H]
-"\u0265" => "h"
-
-# \u0266  [LATIN SMALL LETTER H WITH HOOK]
-"\u0266" => "h"
-
-# \u02ae  [LATIN SMALL LETTER TURNED H WITH FISHHOOK]
-"\u02AE" => "h"
-
-# \u02af  [LATIN SMALL LETTER TURNED H WITH FISHHOOK AND TAIL]
-"\u02AF" => "h"
-
-# \u1e23  [LATIN SMALL LETTER H WITH DOT ABOVE]
-"\u1E23" => "h"
-
-# \u1e25  [LATIN SMALL LETTER H WITH DOT BELOW]
-"\u1E25" => "h"
-
-# \u1e27  [LATIN SMALL LETTER H WITH DIAERESIS]
-"\u1E27" => "h"
-
-# \u1e29  [LATIN SMALL LETTER H WITH CEDILLA]
-"\u1E29" => "h"
-
-# \u1e2b  [LATIN SMALL LETTER H WITH BREVE BELOW]
-"\u1E2B" => "h"
-
-# \u1e96  [LATIN SMALL LETTER H WITH LINE BELOW]
-"\u1E96" => "h"
-
-# \u24d7  [CIRCLED LATIN SMALL LETTER H]
-"\u24D7" => "h"
-
-# \u2c68  [LATIN SMALL LETTER H WITH DESCENDER]
-"\u2C68" => "h"
-
-# \u2c76  [LATIN SMALL LETTER HALF H]
-"\u2C76" => "h"
-
-# \uff48  [FULLWIDTH LATIN SMALL LETTER H]
-"\uFF48" => "h"
-
-# \u01f6  http://en.wikipedia.org/wiki/Hwair  [LATIN CAPITAL LETTER HWAIR]
-"\u01F6" => "HV"
-
-# \u24a3  [PARENTHESIZED LATIN SMALL LETTER H]
-"\u24A3" => "(h)"
-
-# \u0195  [LATIN SMALL LETTER HV]
-"\u0195" => "hv"
-
-# �  [LATIN CAPITAL LETTER I WITH GRAVE]
-"\u00CC" => "I"
-
-# �  [LATIN CAPITAL LETTER I WITH ACUTE]
-"\u00CD" => "I"
-
-# �  [LATIN CAPITAL LETTER I WITH CIRCUMFLEX]
-"\u00CE" => "I"
-
-# �  [LATIN CAPITAL LETTER I WITH DIAERESIS]
-"\u00CF" => "I"
-
-# \u0128  [LATIN CAPITAL LETTER I WITH TILDE]
-"\u0128" => "I"
-
-# \u012a  [LATIN CAPITAL LETTER I WITH MACRON]
-"\u012A" => "I"
-
-# \u012c  [LATIN CAPITAL LETTER I WITH BREVE]
-"\u012C" => "I"
-
-# \u012e  [LATIN CAPITAL LETTER I WITH OGONEK]
-"\u012E" => "I"
-
-# \u0130  [LATIN CAPITAL LETTER I WITH DOT ABOVE]
-"\u0130" => "I"
-
-# \u0196  [LATIN CAPITAL LETTER IOTA]
-"\u0196" => "I"
-
-# \u0197  [LATIN CAPITAL LETTER I WITH STROKE]
-"\u0197" => "I"
-
-# \u01cf  [LATIN CAPITAL LETTER I WITH CARON]
-"\u01CF" => "I"
-
-# \u0208  [LATIN CAPITAL LETTER I WITH DOUBLE GRAVE]
-"\u0208" => "I"
-
-# \u020a  [LATIN CAPITAL LETTER I WITH INVERTED BREVE]
-"\u020A" => "I"
-
-# \u026a  [LATIN LETTER SMALL CAPITAL I]
-"\u026A" => "I"
-
-# \u1d7b  [LATIN SMALL CAPITAL LETTER I WITH STROKE]
-"\u1D7B" => "I"
-
-# \u1e2c  [LATIN CAPITAL LETTER I WITH TILDE BELOW]
-"\u1E2C" => "I"
-
-# \u1e2e  [LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE]
-"\u1E2E" => "I"
-
-# \u1ec8  [LATIN CAPITAL LETTER I WITH HOOK ABOVE]
-"\u1EC8" => "I"
-
-# \u1eca  [LATIN CAPITAL LETTER I WITH DOT BELOW]
-"\u1ECA" => "I"
-
-# \u24be  [CIRCLED LATIN CAPITAL LETTER I]
-"\u24BE" => "I"
-
-# \ua7fe  [LATIN EPIGRAPHIC LETTER I LONGA]
-"\uA7FE" => "I"
-
-# \uff29  [FULLWIDTH LATIN CAPITAL LETTER I]
-"\uFF29" => "I"
-
-# �  [LATIN SMALL LETTER I WITH GRAVE]
-"\u00EC" => "i"
-
-# �  [LATIN SMALL LETTER I WITH ACUTE]
-"\u00ED" => "i"
-
-# �  [LATIN SMALL LETTER I WITH CIRCUMFLEX]
-"\u00EE" => "i"
-
-# �  [LATIN SMALL LETTER I WITH DIAERESIS]
-"\u00EF" => "i"
-
-# \u0129  [LATIN SMALL LETTER I WITH TILDE]
-"\u0129" => "i"
-
-# \u012b  [LATIN SMALL LETTER I WITH MACRON]
-"\u012B" => "i"
-
-# \u012d  [LATIN SMALL LETTER I WITH BREVE]
-"\u012D" => "i"
-
-# \u012f  [LATIN SMALL LETTER I WITH OGONEK]
-"\u012F" => "i"
-
-# \u0131  [LATIN SMALL LETTER DOTLESS I]
-"\u0131" => "i"
-
-# \u01d0  [LATIN SMALL LETTER I WITH CARON]
-"\u01D0" => "i"
-
-# \u0209  [LATIN SMALL LETTER I WITH DOUBLE GRAVE]
-"\u0209" => "i"
-
-# \u020b  [LATIN SMALL LETTER I WITH INVERTED BREVE]
-"\u020B" => "i"
-
-# \u0268  [LATIN SMALL LETTER I WITH STROKE]
-"\u0268" => "i"
-
-# \u1d09  [LATIN SMALL LETTER TURNED I]
-"\u1D09" => "i"
-
-# \u1d62  [LATIN SUBSCRIPT SMALL LETTER I]
-"\u1D62" => "i"
-
-# \u1d7c  [LATIN SMALL LETTER IOTA WITH STROKE]
-"\u1D7C" => "i"
-
-# \u1d96  [LATIN SMALL LETTER I WITH RETROFLEX HOOK]
-"\u1D96" => "i"
-
-# \u1e2d  [LATIN SMALL LETTER I WITH TILDE BELOW]
-"\u1E2D" => "i"
-
-# \u1e2f  [LATIN SMALL LETTER I WITH DIAERESIS AND ACUTE]
-"\u1E2F" => "i"
-
-# \u1ec9  [LATIN SMALL LETTER I WITH HOOK ABOVE]
-"\u1EC9" => "i"
-
-# \u1ecb  [LATIN SMALL LETTER I WITH DOT BELOW]
-"\u1ECB" => "i"
-
-# \u2071  [SUPERSCRIPT LATIN SMALL LETTER I]
-"\u2071" => "i"
-
-# \u24d8  [CIRCLED LATIN SMALL LETTER I]
-"\u24D8" => "i"
-
-# \uff49  [FULLWIDTH LATIN SMALL LETTER I]
-"\uFF49" => "i"
-
-# \u0132  [LATIN CAPITAL LIGATURE IJ]
-"\u0132" => "IJ"
-
-# \u24a4  [PARENTHESIZED LATIN SMALL LETTER I]
-"\u24A4" => "(i)"
-
-# \u0133  [LATIN SMALL LIGATURE IJ]
-"\u0133" => "ij"
-
-# \u0134  [LATIN CAPITAL LETTER J WITH CIRCUMFLEX]
-"\u0134" => "J"
-
-# \u0248  [LATIN CAPITAL LETTER J WITH STROKE]
-"\u0248" => "J"
-
-# \u1d0a  [LATIN LETTER SMALL CAPITAL J]
-"\u1D0A" => "J"
-
-# \u24bf  [CIRCLED LATIN CAPITAL LETTER J]
-"\u24BF" => "J"
-
-# \uff2a  [FULLWIDTH LATIN CAPITAL LETTER J]
-"\uFF2A" => "J"
-
-# \u0135  [LATIN SMALL LETTER J WITH CIRCUMFLEX]
-"\u0135" => "j"
-
-# \u01f0  [LATIN SMALL LETTER J WITH CARON]
-"\u01F0" => "j"
-
-# \u0237  [LATIN SMALL LETTER DOTLESS J]
-"\u0237" => "j"
-
-# \u0249  [LATIN SMALL LETTER J WITH STROKE]
-"\u0249" => "j"
-
-# \u025f  [LATIN SMALL LETTER DOTLESS J WITH STROKE]
-"\u025F" => "j"
-
-# \u0284  [LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK]
-"\u0284" => "j"
-
-# \u029d  [LATIN SMALL LETTER J WITH CROSSED-TAIL]
-"\u029D" => "j"
-
-# \u24d9  [CIRCLED LATIN SMALL LETTER J]
-"\u24D9" => "j"
-
-# \u2c7c  [LATIN SUBSCRIPT SMALL LETTER J]
-"\u2C7C" => "j"
-
-# \uff4a  [FULLWIDTH LATIN SMALL LETTER J]
-"\uFF4A" => "j"
-
-# \u24a5  [PARENTHESIZED LATIN SMALL LETTER J]
-"\u24A5" => "(j)"
-
-# \u0136  [LATIN CAPITAL LETTER K WITH CEDILLA]
-"\u0136" => "K"
-
-# \u0198  [LATIN CAPITAL LETTER K WITH HOOK]
-"\u0198" => "K"
-
-# \u01e8  [LATIN CAPITAL LETTER K WITH CARON]
-"\u01E8" => "K"
-
-# \u1d0b  [LATIN LETTER SMALL CAPITAL K]
-"\u1D0B" => "K"
-
-# \u1e30  [LATIN CAPITAL LETTER K WITH ACUTE]
-"\u1E30" => "K"
-
-# \u1e32  [LATIN CAPITAL LETTER K WITH DOT BELOW]
-"\u1E32" => "K"
-
-# \u1e34  [LATIN CAPITAL LETTER K WITH LINE BELOW]
-"\u1E34" => "K"
-
-# \u24c0  [CIRCLED LATIN CAPITAL LETTER K]
-"\u24C0" => "K"
-
-# \u2c69  [LATIN CAPITAL LETTER K WITH DESCENDER]
-"\u2C69" => "K"
-
-# \ua740  [LATIN CAPITAL LETTER K WITH STROKE]
-"\uA740" => "K"
-
-# \ua742  [LATIN CAPITAL LETTER K WITH DIAGONAL STROKE]
-"\uA742" => "K"
-
-# \ua744  [LATIN CAPITAL LETTER K WITH STROKE AND DIAGONAL STROKE]
-"\uA744" => "K"
-
-# \uff2b  [FULLWIDTH LATIN CAPITAL LETTER K]
-"\uFF2B" => "K"
-
-# \u0137  [LATIN SMALL LETTER K WITH CEDILLA]
-"\u0137" => "k"
-
-# \u0199  [LATIN SMALL LETTER K WITH HOOK]
-"\u0199" => "k"
-
-# \u01e9  [LATIN SMALL LETTER K WITH CARON]
-"\u01E9" => "k"
-
-# \u029e  [LATIN SMALL LETTER TURNED K]
-"\u029E" => "k"
-
-# \u1d84  [LATIN SMALL LETTER K WITH PALATAL HOOK]
-"\u1D84" => "k"
-
-# \u1e31  [LATIN SMALL LETTER K WITH ACUTE]
-"\u1E31" => "k"
-
-# \u1e33  [LATIN SMALL LETTER K WITH DOT BELOW]
-"\u1E33" => "k"
-
-# \u1e35  [LATIN SMALL LETTER K WITH LINE BELOW]
-"\u1E35" => "k"
-
-# \u24da  [CIRCLED LATIN SMALL LETTER K]
-"\u24DA" => "k"
-
-# \u2c6a  [LATIN SMALL LETTER K WITH DESCENDER]
-"\u2C6A" => "k"
-
-# \ua741  [LATIN SMALL LETTER K WITH STROKE]
-"\uA741" => "k"
-
-# \ua743  [LATIN SMALL LETTER K WITH DIAGONAL STROKE]
-"\uA743" => "k"
-
-# \ua745  [LATIN SMALL LETTER K WITH STROKE AND DIAGONAL STROKE]
-"\uA745" => "k"
-
-# \uff4b  [FULLWIDTH LATIN SMALL LETTER K]
-"\uFF4B" => "k"
-
-# \u24a6  [PARENTHESIZED LATIN SMALL LETTER K]
-"\u24A6" => "(k)"
-
-# \u0139  [LATIN CAPITAL LETTER L WITH ACUTE]
-"\u0139" => "L"
-
-# \u013b  [LATIN CAPITAL LETTER L WITH CEDILLA]
-"\u013B" => "L"
-
-# \u013d  [LATIN CAPITAL LETTER L WITH CARON]
-"\u013D" => "L"
-
-# \u013f  [LATIN CAPITAL LETTER L WITH MIDDLE DOT]
-"\u013F" => "L"
-
-# \u0141  [LATIN CAPITAL LETTER L WITH STROKE]
-"\u0141" => "L"
-
-# \u023d  [LATIN CAPITAL LETTER L WITH BAR]
-"\u023D" => "L"
-
-# \u029f  [LATIN LETTER SMALL CAPITAL L]
-"\u029F" => "L"
-
-# \u1d0c  [LATIN LETTER SMALL CAPITAL L WITH STROKE]
-"\u1D0C" => "L"
-
-# \u1e36  [LATIN CAPITAL LETTER L WITH DOT BELOW]
-"\u1E36" => "L"
-
-# \u1e38  [LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON]
-"\u1E38" => "L"
-
-# \u1e3a  [LATIN CAPITAL LETTER L WITH LINE BELOW]
-"\u1E3A" => "L"
-
-# \u1e3c  [LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW]
-"\u1E3C" => "L"
-
-# \u24c1  [CIRCLED LATIN CAPITAL LETTER L]
-"\u24C1" => "L"
-
-# \u2c60  [LATIN CAPITAL LETTER L WITH DOUBLE BAR]
-"\u2C60" => "L"
-
-# \u2c62  [LATIN CAPITAL LETTER L WITH MIDDLE TILDE]
-"\u2C62" => "L"
-
-# \ua746  [LATIN CAPITAL LETTER BROKEN L]
-"\uA746" => "L"
-
-# \ua748  [LATIN CAPITAL LETTER L WITH HIGH STROKE]
-"\uA748" => "L"
-
-# \ua780  [LATIN CAPITAL LETTER TURNED L]
-"\uA780" => "L"
-
-# \uff2c  [FULLWIDTH LATIN CAPITAL LETTER L]
-"\uFF2C" => "L"
-
-# \u013a  [LATIN SMALL LETTER L WITH ACUTE]
-"\u013A" => "l"
-
-# \u013c  [LATIN SMALL LETTER L WITH CEDILLA]
-"\u013C" => "l"
-
-# \u013e  [LATIN SMALL LETTER L WITH CARON]
-"\u013E" => "l"
-
-# \u0140  [LATIN SMALL LETTER L WITH MIDDLE DOT]
-"\u0140" => "l"
-
-# \u0142  [LATIN SMALL LETTER L WITH STROKE]
-"\u0142" => "l"
-
-# \u019a  [LATIN SMALL LETTER L WITH BAR]
-"\u019A" => "l"
-
-# \u0234  [LATIN SMALL LETTER L WITH CURL]
-"\u0234" => "l"
-
-# \u026b  [LATIN SMALL LETTER L WITH MIDDLE TILDE]
-"\u026B" => "l"
-
-# \u026c  [LATIN SMALL LETTER L WITH BELT]
-"\u026C" => "l"
-
-# \u026d  [LATIN SMALL LETTER L WITH RETROFLEX HOOK]
-"\u026D" => "l"
-
-# \u1d85  [LATIN SMALL LETTER L WITH PALATAL HOOK]
-"\u1D85" => "l"
-
-# \u1e37  [LATIN SMALL LETTER L WITH DOT BELOW]
-"\u1E37" => "l"
-
-# \u1e39  [LATIN SMALL LETTER L WITH DOT BELOW AND MACRON]
-"\u1E39" => "l"
-
-# \u1e3b  [LATIN SMALL LETTER L WITH LINE BELOW]
-"\u1E3B" => "l"
-
-# \u1e3d  [LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW]
-"\u1E3D" => "l"
-
-# \u24db  [CIRCLED LATIN SMALL LETTER L]
-"\u24DB" => "l"
-
-# \u2c61  [LATIN SMALL LETTER L WITH DOUBLE BAR]
-"\u2C61" => "l"
-
-# \ua747  [LATIN SMALL LETTER BROKEN L]
-"\uA747" => "l"
-
-# \ua749  [LATIN SMALL LETTER L WITH HIGH STROKE]
-"\uA749" => "l"
-
-# \ua781  [LATIN SMALL LETTER TURNED L]
-"\uA781" => "l"
-
-# \uff4c  [FULLWIDTH LATIN SMALL LETTER L]
-"\uFF4C" => "l"
-
-# \u01c7  [LATIN CAPITAL LETTER LJ]
-"\u01C7" => "LJ"
-
-# \u1efa  [LATIN CAPITAL LETTER MIDDLE-WELSH LL]
-"\u1EFA" => "LL"
-
-# \u01c8  [LATIN CAPITAL LETTER L WITH SMALL LETTER J]
-"\u01C8" => "Lj"
-
-# \u24a7  [PARENTHESIZED LATIN SMALL LETTER L]
-"\u24A7" => "(l)"
-
-# \u01c9  [LATIN SMALL LETTER LJ]
-"\u01C9" => "lj"
-
-# \u1efb  [LATIN SMALL LETTER MIDDLE-WELSH LL]
-"\u1EFB" => "ll"
-
-# \u02aa  [LATIN SMALL LETTER LS DIGRAPH]
-"\u02AA" => "ls"
-
-# \u02ab  [LATIN SMALL LETTER LZ DIGRAPH]
-"\u02AB" => "lz"
-
-# \u019c  [LATIN CAPITAL LETTER TURNED M]
-"\u019C" => "M"
-
-# \u1d0d  [LATIN LETTER SMALL CAPITAL M]
-"\u1D0D" => "M"
-
-# \u1e3e  [LATIN CAPITAL LETTER M WITH ACUTE]
-"\u1E3E" => "M"
-
-# \u1e40  [LATIN CAPITAL LETTER M WITH DOT ABOVE]
-"\u1E40" => "M"
-
-# \u1e42  [LATIN CAPITAL LETTER M WITH DOT BELOW]
-"\u1E42" => "M"
-
-# \u24c2  [CIRCLED LATIN CAPITAL LETTER M]
-"\u24C2" => "M"
-
-# \u2c6e  [LATIN CAPITAL LETTER M WITH HOOK]
-"\u2C6E" => "M"
-
-# \ua7fd  [LATIN EPIGRAPHIC LETTER INVERTED M]
-"\uA7FD" => "M"
-
-# \ua7ff  [LATIN EPIGRAPHIC LETTER ARCHAIC M]
-"\uA7FF" => "M"
-
-# \uff2d  [FULLWIDTH LATIN CAPITAL LETTER M]
-"\uFF2D" => "M"
-
-# \u026f  [LATIN SMALL LETTER TURNED M]
-"\u026F" => "m"
-
-# \u0270  [LATIN SMALL LETTER TURNED M WITH LONG LEG]
-"\u0270" => "m"
-
-# \u0271  [LATIN SMALL LETTER M WITH HOOK]
-"\u0271" => "m"
-
-# \u1d6f  [LATIN SMALL LETTER M WITH MIDDLE TILDE]
-"\u1D6F" => "m"
-
-# \u1d86  [LATIN SMALL LETTER M WITH PALATAL HOOK]
-"\u1D86" => "m"
-
-# \u1e3f  [LATIN SMALL LETTER M WITH ACUTE]
-"\u1E3F" => "m"
-
-# \u1e41  [LATIN SMALL LETTER M WITH DOT ABOVE]
-"\u1E41" => "m"
-
-# \u1e43  [LATIN SMALL LETTER M WITH DOT BELOW]
-"\u1E43" => "m"
-
-# \u24dc  [CIRCLED LATIN SMALL LETTER M]
-"\u24DC" => "m"
-
-# \uff4d  [FULLWIDTH LATIN SMALL LETTER M]
-"\uFF4D" => "m"
-
-# \u24a8  [PARENTHESIZED LATIN SMALL LETTER M]
-"\u24A8" => "(m)"
-
-# �  [LATIN CAPITAL LETTER N WITH TILDE]
-"\u00D1" => "N"
-
-# \u0143  [LATIN CAPITAL LETTER N WITH ACUTE]
-"\u0143" => "N"
-
-# \u0145  [LATIN CAPITAL LETTER N WITH CEDILLA]
-"\u0145" => "N"
-
-# \u0147  [LATIN CAPITAL LETTER N WITH CARON]
-"\u0147" => "N"
-
-# \u014a  http://en.wikipedia.org/wiki/Eng_(letter)  [LATIN CAPITAL LETTER ENG]
-"\u014A" => "N"
-
-# \u019d  [LATIN CAPITAL LETTER N WITH LEFT HOOK]
-"\u019D" => "N"
-
-# \u01f8  [LATIN CAPITAL LETTER N WITH GRAVE]
-"\u01F8" => "N"
-
-# \u0220  [LATIN CAPITAL LETTER N WITH LONG RIGHT LEG]
-"\u0220" => "N"
-
-# \u0274  [LATIN LETTER SMALL CAPITAL N]
-"\u0274" => "N"
-
-# \u1d0e  [LATIN LETTER SMALL CAPITAL REVERSED N]
-"\u1D0E" => "N"
-
-# \u1e44  [LATIN CAPITAL LETTER N WITH DOT ABOVE]
-"\u1E44" => "N"
-
-# \u1e46  [LATIN CAPITAL LETTER N WITH DOT BELOW]
-"\u1E46" => "N"
-
-# \u1e48  [LATIN CAPITAL LETTER N WITH LINE BELOW]
-"\u1E48" => "N"
-
-# \u1e4a  [LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW]
-"\u1E4A" => "N"
-
-# \u24c3  [CIRCLED LATIN CAPITAL LETTER N]
-"\u24C3" => "N"
-
-# \uff2e  [FULLWIDTH LATIN CAPITAL LETTER N]
-"\uFF2E" => "N"
-
-# �  [LATIN SMALL LETTER N WITH TILDE]
-"\u00F1" => "n"
-
-# \u0144  [LATIN SMALL LETTER N WITH ACUTE]
-"\u0144" => "n"
-
-# \u0146  [LATIN SMALL LETTER N WITH CEDILLA]
-"\u0146" => "n"
-
-# \u0148  [LATIN SMALL LETTER N WITH CARON]
-"\u0148" => "n"
-
-# \u0149  [LATIN SMALL LETTER N PRECEDED BY APOSTROPHE]
-"\u0149" => "n"
-
-# \u014b  http://en.wikipedia.org/wiki/Eng_(letter)  [LATIN SMALL LETTER ENG]
-"\u014B" => "n"
-
-# \u019e  [LATIN SMALL LETTER N WITH LONG RIGHT LEG]
-"\u019E" => "n"
-
-# \u01f9  [LATIN SMALL LETTER N WITH GRAVE]
-"\u01F9" => "n"
-
-# \u0235  [LATIN SMALL LETTER N WITH CURL]
-"\u0235" => "n"
-
-# \u0272  [LATIN SMALL LETTER N WITH LEFT HOOK]
-"\u0272" => "n"
-
-# \u0273  [LATIN SMALL LETTER N WITH RETROFLEX HOOK]
-"\u0273" => "n"
-
-# \u1d70  [LATIN SMALL LETTER N WITH MIDDLE TILDE]
-"\u1D70" => "n"
-
-# \u1d87  [LATIN SMALL LETTER N WITH PALATAL HOOK]
-"\u1D87" => "n"
-
-# \u1e45  [LATIN SMALL LETTER N WITH DOT ABOVE]
-"\u1E45" => "n"
-
-# \u1e47  [LATIN SMALL LETTER N WITH DOT BELOW]
-"\u1E47" => "n"
-
-# \u1e49  [LATIN SMALL LETTER N WITH LINE BELOW]
-"\u1E49" => "n"
-
-# \u1e4b  [LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW]
-"\u1E4B" => "n"
-
-# \u207f  [SUPERSCRIPT LATIN SMALL LETTER N]
-"\u207F" => "n"
-
-# \u24dd  [CIRCLED LATIN SMALL LETTER N]
-"\u24DD" => "n"
-
-# \uff4e  [FULLWIDTH LATIN SMALL LETTER N]
-"\uFF4E" => "n"
-
-# \u01ca  [LATIN CAPITAL LETTER NJ]
-"\u01CA" => "NJ"
-
-# \u01cb  [LATIN CAPITAL LETTER N WITH SMALL LETTER J]
-"\u01CB" => "Nj"
-
-# \u24a9  [PARENTHESIZED LATIN SMALL LETTER N]
-"\u24A9" => "(n)"
-
-# \u01cc  [LATIN SMALL LETTER NJ]
-"\u01CC" => "nj"
-
-# �  [LATIN CAPITAL LETTER O WITH GRAVE]
-"\u00D2" => "O"
-
-# �  [LATIN CAPITAL LETTER O WITH ACUTE]
-"\u00D3" => "O"
-
-# �  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX]
-"\u00D4" => "O"
-
-# �  [LATIN CAPITAL LETTER O WITH TILDE]
-"\u00D5" => "O"
-
-# �  [LATIN CAPITAL LETTER O WITH DIAERESIS]
-"\u00D6" => "O"
-
-# �  [LATIN CAPITAL LETTER O WITH STROKE]
-"\u00D8" => "O"
-
-# \u014c  [LATIN CAPITAL LETTER O WITH MACRON]
-"\u014C" => "O"
-
-# \u014e  [LATIN CAPITAL LETTER O WITH BREVE]
-"\u014E" => "O"
-
-# \u0150  [LATIN CAPITAL LETTER O WITH DOUBLE ACUTE]
-"\u0150" => "O"
-
-# \u0186  [LATIN CAPITAL LETTER OPEN O]
-"\u0186" => "O"
-
-# \u019f  [LATIN CAPITAL LETTER O WITH MIDDLE TILDE]
-"\u019F" => "O"
-
-# \u01a0  [LATIN CAPITAL LETTER O WITH HORN]
-"\u01A0" => "O"
-
-# \u01d1  [LATIN CAPITAL LETTER O WITH CARON]
-"\u01D1" => "O"
-
-# \u01ea  [LATIN CAPITAL LETTER O WITH OGONEK]
-"\u01EA" => "O"
-
-# \u01ec  [LATIN CAPITAL LETTER O WITH OGONEK AND MACRON]
-"\u01EC" => "O"
-
-# \u01fe  [LATIN CAPITAL LETTER O WITH STROKE AND ACUTE]
-"\u01FE" => "O"
-
-# \u020c  [LATIN CAPITAL LETTER O WITH DOUBLE GRAVE]
-"\u020C" => "O"
-
-# \u020e  [LATIN CAPITAL LETTER O WITH INVERTED BREVE]
-"\u020E" => "O"
-
-# \u022a  [LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON]
-"\u022A" => "O"
-
-# \u022c  [LATIN CAPITAL LETTER O WITH TILDE AND MACRON]
-"\u022C" => "O"
-
-# \u022e  [LATIN CAPITAL LETTER O WITH DOT ABOVE]
-"\u022E" => "O"
-
-# \u0230  [LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON]
-"\u0230" => "O"
-
-# \u1d0f  [LATIN LETTER SMALL CAPITAL O]
-"\u1D0F" => "O"
-
-# \u1d10  [LATIN LETTER SMALL CAPITAL OPEN O]
-"\u1D10" => "O"
-
-# \u1e4c  [LATIN CAPITAL LETTER O WITH TILDE AND ACUTE]
-"\u1E4C" => "O"
-
-# \u1e4e  [LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS]
-"\u1E4E" => "O"
-
-# \u1e50  [LATIN CAPITAL LETTER O WITH MACRON AND GRAVE]
-"\u1E50" => "O"
-
-# \u1e52  [LATIN CAPITAL LETTER O WITH MACRON AND ACUTE]
-"\u1E52" => "O"
-
-# \u1ecc  [LATIN CAPITAL LETTER O WITH DOT BELOW]
-"\u1ECC" => "O"
-
-# \u1ece  [LATIN CAPITAL LETTER O WITH HOOK ABOVE]
-"\u1ECE" => "O"
-
-# \u1ed0  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE]
-"\u1ED0" => "O"
-
-# \u1ed2  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE]
-"\u1ED2" => "O"
-
-# \u1ed4  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE]
-"\u1ED4" => "O"
-
-# \u1ed6  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE]
-"\u1ED6" => "O"
-
-# \u1ed8  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW]
-"\u1ED8" => "O"
-
-# \u1eda  [LATIN CAPITAL LETTER O WITH HORN AND ACUTE]
-"\u1EDA" => "O"
-
-# \u1edc  [LATIN CAPITAL LETTER O WITH HORN AND GRAVE]
-"\u1EDC" => "O"
-
-# \u1ede  [LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE]
-"\u1EDE" => "O"
-
-# \u1ee0  [LATIN CAPITAL LETTER O WITH HORN AND TILDE]
-"\u1EE0" => "O"
-
-# \u1ee2  [LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW]
-"\u1EE2" => "O"
-
-# \u24c4  [CIRCLED LATIN CAPITAL LETTER O]
-"\u24C4" => "O"
-
-# \ua74a  [LATIN CAPITAL LETTER O WITH LONG STROKE OVERLAY]
-"\uA74A" => "O"
-
-# \ua74c  [LATIN CAPITAL LETTER O WITH LOOP]
-"\uA74C" => "O"
-
-# \uff2f  [FULLWIDTH LATIN CAPITAL LETTER O]
-"\uFF2F" => "O"
-
-# �  [LATIN SMALL LETTER O WITH GRAVE]
-"\u00F2" => "o"
-
-# �  [LATIN SMALL LETTER O WITH ACUTE]
-"\u00F3" => "o"
-
-# �  [LATIN SMALL LETTER O WITH CIRCUMFLEX]
-"\u00F4" => "o"
-
-# �  [LATIN SMALL LETTER O WITH TILDE]
-"\u00F5" => "o"
-
-# �  [LATIN SMALL LETTER O WITH DIAERESIS]
-"\u00F6" => "o"
-
-# �  [LATIN SMALL LETTER O WITH STROKE]
-"\u00F8" => "o"
-
-# \u014d  [LATIN SMALL LETTER O WITH MACRON]
-"\u014D" => "o"
-
-# \u014f  [LATIN SMALL LETTER O WITH BREVE]
-"\u014F" => "o"
-
-# \u0151  [LATIN SMALL LETTER O WITH DOUBLE ACUTE]
-"\u0151" => "o"
-
-# \u01a1  [LATIN SMALL LETTER O WITH HORN]
-"\u01A1" => "o"
-
-# \u01d2  [LATIN SMALL LETTER O WITH CARON]
-"\u01D2" => "o"
-
-# \u01eb  [LATIN SMALL LETTER O WITH OGONEK]
-"\u01EB" => "o"
-
-# \u01ed  [LATIN SMALL LETTER O WITH OGONEK AND MACRON]
-"\u01ED" => "o"
-
-# \u01ff  [LATIN SMALL LETTER O WITH STROKE AND ACUTE]
-"\u01FF" => "o"
-
-# \u020d  [LATIN SMALL LETTER O WITH DOUBLE GRAVE]
-"\u020D" => "o"
-
-# \u020f  [LATIN SMALL LETTER O WITH INVERTED BREVE]
-"\u020F" => "o"
-
-# \u022b  [LATIN SMALL LETTER O WITH DIAERESIS AND MACRON]
-"\u022B" => "o"
-
-# \u022d  [LATIN SMALL LETTER O WITH TILDE AND MACRON]
-"\u022D" => "o"
-
-# \u022f  [LATIN SMALL LETTER O WITH DOT ABOVE]
-"\u022F" => "o"
-
-# \u0231  [LATIN SMALL LETTER O WITH DOT ABOVE AND MACRON]
-"\u0231" => "o"
-
-# \u0254  [LATIN SMALL LETTER OPEN O]
-"\u0254" => "o"
-
-# \u0275  [LATIN SMALL LETTER BARRED O]
-"\u0275" => "o"
-
-# \u1d16  [LATIN SMALL LETTER TOP HALF O]
-"\u1D16" => "o"
-
-# \u1d17  [LATIN SMALL LETTER BOTTOM HALF O]
-"\u1D17" => "o"
-
-# \u1d97  [LATIN SMALL LETTER OPEN O WITH RETROFLEX HOOK]
-"\u1D97" => "o"
-
-# \u1e4d  [LATIN SMALL LETTER O WITH TILDE AND ACUTE]
-"\u1E4D" => "o"
-
-# \u1e4f  [LATIN SMALL LETTER O WITH TILDE AND DIAERESIS]
-"\u1E4F" => "o"
-
-# \u1e51  [LATIN SMALL LETTER O WITH MACRON AND GRAVE]
-"\u1E51" => "o"
-
-# \u1e53  [LATIN SMALL LETTER O WITH MACRON AND ACUTE]
-"\u1E53" => "o"
-
-# \u1ecd  [LATIN SMALL LETTER O WITH DOT BELOW]
-"\u1ECD" => "o"
-
-# \u1ecf  [LATIN SMALL LETTER O WITH HOOK ABOVE]
-"\u1ECF" => "o"
-
-# \u1ed1  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE]
-"\u1ED1" => "o"
-
-# \u1ed3  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE]
-"\u1ED3" => "o"
-
-# \u1ed5  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE]
-"\u1ED5" => "o"
-
-# \u1ed7  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE]
-"\u1ED7" => "o"
-
-# \u1ed9  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW]
-"\u1ED9" => "o"
-
-# \u1edb  [LATIN SMALL LETTER O WITH HORN AND ACUTE]
-"\u1EDB" => "o"
-
-# \u1edd  [LATIN SMALL LETTER O WITH HORN AND GRAVE]
-"\u1EDD" => "o"
-
-# \u1edf  [LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE]
-"\u1EDF" => "o"
-
-# \u1ee1  [LATIN SMALL LETTER O WITH HORN AND TILDE]
-"\u1EE1" => "o"
-
-# \u1ee3  [LATIN SMALL LETTER O WITH HORN AND DOT BELOW]
-"\u1EE3" => "o"
-
-# \u2092  [LATIN SUBSCRIPT SMALL LETTER O]
-"\u2092" => "o"
-
-# \u24de  [CIRCLED LATIN SMALL LETTER O]
-"\u24DE" => "o"
-
-# \u2c7a  [LATIN SMALL LETTER O WITH LOW RING INSIDE]
-"\u2C7A" => "o"
-
-# \ua74b  [LATIN SMALL LETTER O WITH LONG STROKE OVERLAY]
-"\uA74B" => "o"
-
-# \ua74d  [LATIN SMALL LETTER O WITH LOOP]
-"\uA74D" => "o"
-
-# \uff4f  [FULLWIDTH LATIN SMALL LETTER O]
-"\uFF4F" => "o"
-
-# \u0152  [LATIN CAPITAL LIGATURE OE]
-"\u0152" => "OE"
-
-# \u0276  [LATIN LETTER SMALL CAPITAL OE]
-"\u0276" => "OE"
-
-# \ua74e  [LATIN CAPITAL LETTER OO]
-"\uA74E" => "OO"
-
-# \u0222  http://en.wikipedia.org/wiki/OU  [LATIN CAPITAL LETTER OU]
-"\u0222" => "OU"
-
-# \u1d15  [LATIN LETTER SMALL CAPITAL OU]
-"\u1D15" => "OU"
-
-# \u24aa  [PARENTHESIZED LATIN SMALL LETTER O]
-"\u24AA" => "(o)"
-
-# \u0153  [LATIN SMALL LIGATURE OE]
-"\u0153" => "oe"
-
-# \u1d14  [LATIN SMALL LETTER TURNED OE]
-"\u1D14" => "oe"
-
-# \ua74f  [LATIN SMALL LETTER OO]
-"\uA74F" => "oo"
-
-# \u0223  http://en.wikipedia.org/wiki/OU  [LATIN SMALL LETTER OU]
-"\u0223" => "ou"
-
-# \u01a4  [LATIN CAPITAL LETTER P WITH HOOK]
-"\u01A4" => "P"
-
-# \u1d18  [LATIN LETTER SMALL CAPITAL P]
-"\u1D18" => "P"
-
-# \u1e54  [LATIN CAPITAL LETTER P WITH ACUTE]
-"\u1E54" => "P"
-
-# \u1e56  [LATIN CAPITAL LETTER P WITH DOT ABOVE]
-"\u1E56" => "P"
-
-# \u24c5  [CIRCLED LATIN CAPITAL LETTER P]
-"\u24C5" => "P"
-
-# \u2c63  [LATIN CAPITAL LETTER P WITH STROKE]
-"\u2C63" => "P"
-
-# \ua750  [LATIN CAPITAL LETTER P WITH STROKE THROUGH DESCENDER]
-"\uA750" => "P"
-
-# \ua752  [LATIN CAPITAL LETTER P WITH FLOURISH]
-"\uA752" => "P"
-
-# \ua754  [LATIN CAPITAL LETTER P WITH SQUIRREL TAIL]
-"\uA754" => "P"
-
-# \uff30  [FULLWIDTH LATIN CAPITAL LETTER P]
-"\uFF30" => "P"
-
-# \u01a5  [LATIN SMALL LETTER P WITH HOOK]
-"\u01A5" => "p"
-
-# \u1d71  [LATIN SMALL LETTER P WITH MIDDLE TILDE]
-"\u1D71" => "p"
-
-# \u1d7d  [LATIN SMALL LETTER P WITH STROKE]
-"\u1D7D" => "p"
-
-# \u1d88  [LATIN SMALL LETTER P WITH PALATAL HOOK]
-"\u1D88" => "p"
-
-# \u1e55  [LATIN SMALL LETTER P WITH ACUTE]
-"\u1E55" => "p"
-
-# \u1e57  [LATIN SMALL LETTER P WITH DOT ABOVE]
-"\u1E57" => "p"
-
-# \u24df  [CIRCLED LATIN SMALL LETTER P]
-"\u24DF" => "p"
-
-# \ua751  [LATIN SMALL LETTER P WITH STROKE THROUGH DESCENDER]
-"\uA751" => "p"
-
-# \ua753  [LATIN SMALL LETTER P WITH FLOURISH]
-"\uA753" => "p"
-
-# \ua755  [LATIN SMALL LETTER P WITH SQUIRREL TAIL]
-"\uA755" => "p"
-
-# \ua7fc  [LATIN EPIGRAPHIC LETTER REVERSED P]
-"\uA7FC" => "p"
-
-# \uff50  [FULLWIDTH LATIN SMALL LETTER P]
-"\uFF50" => "p"
-
-# \u24ab  [PARENTHESIZED LATIN SMALL LETTER P]
-"\u24AB" => "(p)"
-
-# \u024a  [LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL]
-"\u024A" => "Q"
-
-# \u24c6  [CIRCLED LATIN CAPITAL LETTER Q]
-"\u24C6" => "Q"
-
-# \ua756  [LATIN CAPITAL LETTER Q WITH STROKE THROUGH DESCENDER]
-"\uA756" => "Q"
-
-# \ua758  [LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE]
-"\uA758" => "Q"
-
-# \uff31  [FULLWIDTH LATIN CAPITAL LETTER Q]
-"\uFF31" => "Q"
-
-# \u0138  http://en.wikipedia.org/wiki/Kra_(letter)  [LATIN SMALL LETTER KRA]
-"\u0138" => "q"
-
-# \u024b  [LATIN SMALL LETTER Q WITH HOOK TAIL]
-"\u024B" => "q"
-
-# \u02a0  [LATIN SMALL LETTER Q WITH HOOK]
-"\u02A0" => "q"
-
-# \u24e0  [CIRCLED LATIN SMALL LETTER Q]
-"\u24E0" => "q"
-
-# \ua757  [LATIN SMALL LETTER Q WITH STROKE THROUGH DESCENDER]
-"\uA757" => "q"
-
-# \ua759  [LATIN SMALL LETTER Q WITH DIAGONAL STROKE]
-"\uA759" => "q"
-
-# \uff51  [FULLWIDTH LATIN SMALL LETTER Q]
-"\uFF51" => "q"
-
-# \u24ac  [PARENTHESIZED LATIN SMALL LETTER Q]
-"\u24AC" => "(q)"
-
-# \u0239  [LATIN SMALL LETTER QP DIGRAPH]
-"\u0239" => "qp"
-
-# \u0154  [LATIN CAPITAL LETTER R WITH ACUTE]
-"\u0154" => "R"
-
-# \u0156  [LATIN CAPITAL LETTER R WITH CEDILLA]
-"\u0156" => "R"
-
-# \u0158  [LATIN CAPITAL LETTER R WITH CARON]
-"\u0158" => "R"
-
-# \u0212  [LATIN CAPITAL LETTER R WITH DOUBLE GRAVE]
-"\u0210" => "R"
-
-# \u0212  [LATIN CAPITAL LETTER R WITH INVERTED BREVE]
-"\u0212" => "R"
-
-# \u024c  [LATIN CAPITAL LETTER R WITH STROKE]
-"\u024C" => "R"
-
-# \u0280  [LATIN LETTER SMALL CAPITAL R]
-"\u0280" => "R"
-
-# \u0281  [LATIN LETTER SMALL CAPITAL INVERTED R]
-"\u0281" => "R"
-
-# \u1d19  [LATIN LETTER SMALL CAPITAL REVERSED R]
-"\u1D19" => "R"
-
-# \u1d1a  [LATIN LETTER SMALL CAPITAL TURNED R]
-"\u1D1A" => "R"
-
-# \u1e58  [LATIN CAPITAL LETTER R WITH DOT ABOVE]
-"\u1E58" => "R"
-
-# \u1e5a  [LATIN CAPITAL LETTER R WITH DOT BELOW]
-"\u1E5A" => "R"
-
-# \u1e5c  [LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON]
-"\u1E5C" => "R"
-
-# \u1e5e  [LATIN CAPITAL LETTER R WITH LINE BELOW]
-"\u1E5E" => "R"
-
-# \u24c7  [CIRCLED LATIN CAPITAL LETTER R]
-"\u24C7" => "R"
-
-# \u2c64  [LATIN CAPITAL LETTER R WITH TAIL]
-"\u2C64" => "R"
-
-# \ua75a  [LATIN CAPITAL LETTER R ROTUNDA]
-"\uA75A" => "R"
-
-# \ua782  [LATIN CAPITAL LETTER INSULAR R]
-"\uA782" => "R"
-
-# \uff32  [FULLWIDTH LATIN CAPITAL LETTER R]
-"\uFF32" => "R"
-
-# \u0155  [LATIN SMALL LETTER R WITH ACUTE]
-"\u0155" => "r"
-
-# \u0157  [LATIN SMALL LETTER R WITH CEDILLA]
-"\u0157" => "r"
-
-# \u0159  [LATIN SMALL LETTER R WITH CARON]
-"\u0159" => "r"
-
-# \u0211  [LATIN SMALL LETTER R WITH DOUBLE GRAVE]
-"\u0211" => "r"
-
-# \u0213  [LATIN SMALL LETTER R WITH INVERTED BREVE]
-"\u0213" => "r"
-
-# \u024d  [LATIN SMALL LETTER R WITH STROKE]
-"\u024D" => "r"
-
-# \u027c  [LATIN SMALL LETTER R WITH LONG LEG]
-"\u027C" => "r"
-
-# \u027d  [LATIN SMALL LETTER R WITH TAIL]
-"\u027D" => "r"
-
-# \u027e  [LATIN SMALL LETTER R WITH FISHHOOK]
-"\u027E" => "r"
-
-# \u027f  [LATIN SMALL LETTER REVERSED R WITH FISHHOOK]
-"\u027F" => "r"
-
-# \u1d63  [LATIN SUBSCRIPT SMALL LETTER R]
-"\u1D63" => "r"
-
-# \u1d72  [LATIN SMALL LETTER R WITH MIDDLE TILDE]
-"\u1D72" => "r"
-
-# \u1d73  [LATIN SMALL LETTER R WITH FISHHOOK AND MIDDLE TILDE]
-"\u1D73" => "r"
-
-# \u1d89  [LATIN SMALL LETTER R WITH PALATAL HOOK]
-"\u1D89" => "r"
-
-# \u1e59  [LATIN SMALL LETTER R WITH DOT ABOVE]
-"\u1E59" => "r"
-
-# \u1e5b  [LATIN SMALL LETTER R WITH DOT BELOW]
-"\u1E5B" => "r"
-
-# \u1e5d  [LATIN SMALL LETTER R WITH DOT BELOW AND MACRON]
-"\u1E5D" => "r"
-
-# \u1e5f  [LATIN SMALL LETTER R WITH LINE BELOW]
-"\u1E5F" => "r"
-
-# \u24e1  [CIRCLED LATIN SMALL LETTER R]
-"\u24E1" => "r"
-
-# \ua75b  [LATIN SMALL LETTER R ROTUNDA]
-"\uA75B" => "r"
-
-# \ua783  [LATIN SMALL LETTER INSULAR R]
-"\uA783" => "r"
-
-# \uff52  [FULLWIDTH LATIN SMALL LETTER R]
-"\uFF52" => "r"
-
-# \u24ad  [PARENTHESIZED LATIN SMALL LETTER R]
-"\u24AD" => "(r)"
-
-# \u015a  [LATIN CAPITAL LETTER S WITH ACUTE]
-"\u015A" => "S"
-
-# \u015c  [LATIN CAPITAL LETTER S WITH CIRCUMFLEX]
-"\u015C" => "S"
-
-# \u015e  [LATIN CAPITAL LETTER S WITH CEDILLA]
-"\u015E" => "S"
-
-# \u0160  [LATIN CAPITAL LETTER S WITH CARON]
-"\u0160" => "S"
-
-# \u0218  [LATIN CAPITAL LETTER S WITH COMMA BELOW]
-"\u0218" => "S"
-
-# \u1e60  [LATIN CAPITAL LETTER S WITH DOT ABOVE]
-"\u1E60" => "S"
-
-# \u1e62  [LATIN CAPITAL LETTER S WITH DOT BELOW]
-"\u1E62" => "S"
-
-# \u1e64  [LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE]
-"\u1E64" => "S"
-
-# \u1e66  [LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE]
-"\u1E66" => "S"
-
-# \u1e68  [LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE]
-"\u1E68" => "S"
-
-# \u24c8  [CIRCLED LATIN CAPITAL LETTER S]
-"\u24C8" => "S"
-
-# \ua731  [LATIN LETTER SMALL CAPITAL S]
-"\uA731" => "S"
-
-# \ua785  [LATIN SMALL LETTER INSULAR S]
-"\uA785" => "S"
-
-# \uff33  [FULLWIDTH LATIN CAPITAL LETTER S]
-"\uFF33" => "S"
-
-# \u015b  [LATIN SMALL LETTER S WITH ACUTE]
-"\u015B" => "s"
-
-# \u015d  [LATIN SMALL LETTER S WITH CIRCUMFLEX]
-"\u015D" => "s"
-
-# \u015f  [LATIN SMALL LETTER S WITH CEDILLA]
-"\u015F" => "s"
-
-# \u0161  [LATIN SMALL LETTER S WITH CARON]
-"\u0161" => "s"
-
-# \u017f  http://en.wikipedia.org/wiki/Long_S  [LATIN SMALL LETTER LONG S]
-"\u017F" => "s"
-
-# \u0219  [LATIN SMALL LETTER S WITH COMMA BELOW]
-"\u0219" => "s"
-
-# \u023f  [LATIN SMALL LETTER S WITH SWASH TAIL]
-"\u023F" => "s"
-
-# \u0282  [LATIN SMALL LETTER S WITH HOOK]
-"\u0282" => "s"
-
-# \u1d74  [LATIN SMALL LETTER S WITH MIDDLE TILDE]
-"\u1D74" => "s"
-
-# \u1d8a  [LATIN SMALL LETTER S WITH PALATAL HOOK]
-"\u1D8A" => "s"
-
-# \u1e61  [LATIN SMALL LETTER S WITH DOT ABOVE]
-"\u1E61" => "s"
-
-# \u1e63  [LATIN SMALL LETTER S WITH DOT BELOW]
-"\u1E63" => "s"
-
-# \u1e65  [LATIN SMALL LETTER S WITH ACUTE AND DOT ABOVE]
-"\u1E65" => "s"
-
-# \u1e67  [LATIN SMALL LETTER S WITH CARON AND DOT ABOVE]
-"\u1E67" => "s"
-
-# \u1e69  [LATIN SMALL LETTER S WITH DOT BELOW AND DOT ABOVE]
-"\u1E69" => "s"
-
-# \u1e9c  [LATIN SMALL LETTER LONG S WITH DIAGONAL STROKE]
-"\u1E9C" => "s"
-
-# \u1e9d  [LATIN SMALL LETTER LONG S WITH HIGH STROKE]
-"\u1E9D" => "s"
-
-# \u24e2  [CIRCLED LATIN SMALL LETTER S]
-"\u24E2" => "s"
-
-# \ua784  [LATIN CAPITAL LETTER INSULAR S]
-"\uA784" => "s"
-
-# \uff53  [FULLWIDTH LATIN SMALL LETTER S]
-"\uFF53" => "s"
-
-# \u1e9e  [LATIN CAPITAL LETTER SHARP S]
-"\u1E9E" => "SS"
-
-# \u24ae  [PARENTHESIZED LATIN SMALL LETTER S]
-"\u24AE" => "(s)"
-
-# �  [LATIN SMALL LETTER SHARP S]
-"\u00DF" => "ss"
-
-# \ufb06  [LATIN SMALL LIGATURE ST]
-"\uFB06" => "st"
-
-# \u0162  [LATIN CAPITAL LETTER T WITH CEDILLA]
-"\u0162" => "T"
-
-# \u0164  [LATIN CAPITAL LETTER T WITH CARON]
-"\u0164" => "T"
-
-# \u0166  [LATIN CAPITAL LETTER T WITH STROKE]
-"\u0166" => "T"
-
-# \u01ac  [LATIN CAPITAL LETTER T WITH HOOK]
-"\u01AC" => "T"
-
-# \u01ae  [LATIN CAPITAL LETTER T WITH RETROFLEX HOOK]
-"\u01AE" => "T"
-
-# \u021a  [LATIN CAPITAL LETTER T WITH COMMA BELOW]
-"\u021A" => "T"
-
-# \u023e  [LATIN CAPITAL LETTER T WITH DIAGONAL STROKE]
-"\u023E" => "T"
-
-# \u1d1b  [LATIN LETTER SMALL CAPITAL T]
-"\u1D1B" => "T"
-
-# \u1e6a  [LATIN CAPITAL LETTER T WITH DOT ABOVE]
-"\u1E6A" => "T"
-
-# \u1e6c  [LATIN CAPITAL LETTER T WITH DOT BELOW]
-"\u1E6C" => "T"
-
-# \u1e6e  [LATIN CAPITAL LETTER T WITH LINE BELOW]
-"\u1E6E" => "T"
-
-# \u1e70  [LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW]
-"\u1E70" => "T"
-
-# \u24c9  [CIRCLED LATIN CAPITAL LETTER T]
-"\u24C9" => "T"
-
-# \ua786  [LATIN CAPITAL LETTER INSULAR T]
-"\uA786" => "T"
-
-# \uff34  [FULLWIDTH LATIN CAPITAL LETTER T]
-"\uFF34" => "T"
-
-# \u0163  [LATIN SMALL LETTER T WITH CEDILLA]
-"\u0163" => "t"
-
-# \u0165  [LATIN SMALL LETTER T WITH CARON]
-"\u0165" => "t"
-
-# \u0167  [LATIN SMALL LETTER T WITH STROKE]
-"\u0167" => "t"
-
-# \u01ab  [LATIN SMALL LETTER T WITH PALATAL HOOK]
-"\u01AB" => "t"
-
-# \u01ad  [LATIN SMALL LETTER T WITH HOOK]
-"\u01AD" => "t"
-
-# \u021b  [LATIN SMALL LETTER T WITH COMMA BELOW]
-"\u021B" => "t"
-
-# \u0236  [LATIN SMALL LETTER T WITH CURL]
-"\u0236" => "t"
-
-# \u0287  [LATIN SMALL LETTER TURNED T]
-"\u0287" => "t"
-
-# \u0288  [LATIN SMALL LETTER T WITH RETROFLEX HOOK]
-"\u0288" => "t"
-
-# \u1d75  [LATIN SMALL LETTER T WITH MIDDLE TILDE]
-"\u1D75" => "t"
-
-# \u1e6b  [LATIN SMALL LETTER T WITH DOT ABOVE]
-"\u1E6B" => "t"
-
-# \u1e6d  [LATIN SMALL LETTER T WITH DOT BELOW]
-"\u1E6D" => "t"
-
-# \u1e6f  [LATIN SMALL LETTER T WITH LINE BELOW]
-"\u1E6F" => "t"
-
-# \u1e71  [LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW]
-"\u1E71" => "t"
-
-# \u1e97  [LATIN SMALL LETTER T WITH DIAERESIS]
-"\u1E97" => "t"
-
-# \u24e3  [CIRCLED LATIN SMALL LETTER T]
-"\u24E3" => "t"
-
-# \u2c66  [LATIN SMALL LETTER T WITH DIAGONAL STROKE]
-"\u2C66" => "t"
-
-# \uff54  [FULLWIDTH LATIN SMALL LETTER T]
-"\uFF54" => "t"
-
-# �  [LATIN CAPITAL LETTER THORN]
-"\u00DE" => "TH"
-
-# \ua766  [LATIN CAPITAL LETTER THORN WITH STROKE THROUGH DESCENDER]
-"\uA766" => "TH"
-
-# \ua728  [LATIN CAPITAL LETTER TZ]
-"\uA728" => "TZ"
-
-# \u24af  [PARENTHESIZED LATIN SMALL LETTER T]
-"\u24AF" => "(t)"
-
-# \u02a8  [LATIN SMALL LETTER TC DIGRAPH WITH CURL]
-"\u02A8" => "tc"
-
-# �  [LATIN SMALL LETTER THORN]
-"\u00FE" => "th"
-
-# \u1d7a  [LATIN SMALL LETTER TH WITH STRIKETHROUGH]
-"\u1D7A" => "th"
-
-# \ua767  [LATIN SMALL LETTER THORN WITH STROKE THROUGH DESCENDER]
-"\uA767" => "th"
-
-# \u02a6  [LATIN SMALL LETTER TS DIGRAPH]
-"\u02A6" => "ts"
-
-# \ua729  [LATIN SMALL LETTER TZ]
-"\uA729" => "tz"
-
-# �  [LATIN CAPITAL LETTER U WITH GRAVE]
-"\u00D9" => "U"
-
-# �  [LATIN CAPITAL LETTER U WITH ACUTE]
-"\u00DA" => "U"
-
-# �  [LATIN CAPITAL LETTER U WITH CIRCUMFLEX]
-"\u00DB" => "U"
-
-# �  [LATIN CAPITAL LETTER U WITH DIAERESIS]
-"\u00DC" => "U"
-
-# \u0168  [LATIN CAPITAL LETTER U WITH TILDE]
-"\u0168" => "U"
-
-# \u016a  [LATIN CAPITAL LETTER U WITH MACRON]
-"\u016A" => "U"
-
-# \u016c  [LATIN CAPITAL LETTER U WITH BREVE]
-"\u016C" => "U"
-
-# \u016e  [LATIN CAPITAL LETTER U WITH RING ABOVE]
-"\u016E" => "U"
-
-# \u0170  [LATIN CAPITAL LETTER U WITH DOUBLE ACUTE]
-"\u0170" => "U"
-
-# \u0172  [LATIN CAPITAL LETTER U WITH OGONEK]
-"\u0172" => "U"
-
-# \u01af  [LATIN CAPITAL LETTER U WITH HORN]
-"\u01AF" => "U"
-
-# \u01d3  [LATIN CAPITAL LETTER U WITH CARON]
-"\u01D3" => "U"
-
-# \u01d5  [LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON]
-"\u01D5" => "U"
-
-# \u01d7  [LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE]
-"\u01D7" => "U"
-
-# \u01d9  [LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON]
-"\u01D9" => "U"
-
-# \u01db  [LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE]
-"\u01DB" => "U"
-
-# \u0214  [LATIN CAPITAL LETTER U WITH DOUBLE GRAVE]
-"\u0214" => "U"
-
-# \u0216  [LATIN CAPITAL LETTER U WITH INVERTED BREVE]
-"\u0216" => "U"
-
-# \u0244  [LATIN CAPITAL LETTER U BAR]
-"\u0244" => "U"
-
-# \u1d1c  [LATIN LETTER SMALL CAPITAL U]
-"\u1D1C" => "U"
-
-# \u1d7e  [LATIN SMALL CAPITAL LETTER U WITH STROKE]
-"\u1D7E" => "U"
-
-# \u1e72  [LATIN CAPITAL LETTER U WITH DIAERESIS BELOW]
-"\u1E72" => "U"
-
-# \u1e74  [LATIN CAPITAL LETTER U WITH TILDE BELOW]
-"\u1E74" => "U"
-
-# \u1e76  [LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW]
-"\u1E76" => "U"
-
-# \u1e78  [LATIN CAPITAL LETTER U WITH TILDE AND ACUTE]
-"\u1E78" => "U"
-
-# \u1e7a  [LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS]
-"\u1E7A" => "U"
-
-# \u1ee4  [LATIN CAPITAL LETTER U WITH DOT BELOW]
-"\u1EE4" => "U"
-
-# \u1ee6  [LATIN CAPITAL LETTER U WITH HOOK ABOVE]
-"\u1EE6" => "U"
-
-# \u1ee8  [LATIN CAPITAL LETTER U WITH HORN AND ACUTE]
-"\u1EE8" => "U"
-
-# \u1eea  [LATIN CAPITAL LETTER U WITH HORN AND GRAVE]
-"\u1EEA" => "U"
-
-# \u1eec  [LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE]
-"\u1EEC" => "U"
-
-# \u1eee  [LATIN CAPITAL LETTER U WITH HORN AND TILDE]
-"\u1EEE" => "U"
-
-# \u1ef0  [LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW]
-"\u1EF0" => "U"
-
-# \u24ca  [CIRCLED LATIN CAPITAL LETTER U]
-"\u24CA" => "U"
-
-# \uff35  [FULLWIDTH LATIN CAPITAL LETTER U]
-"\uFF35" => "U"
-
-# �  [LATIN SMALL LETTER U WITH GRAVE]
-"\u00F9" => "u"
-
-# �  [LATIN SMALL LETTER U WITH ACUTE]
-"\u00FA" => "u"
-
-# �  [LATIN SMALL LETTER U WITH CIRCUMFLEX]
-"\u00FB" => "u"
-
-# �  [LATIN SMALL LETTER U WITH DIAERESIS]
-"\u00FC" => "u"
-
-# \u0169  [LATIN SMALL LETTER U WITH TILDE]
-"\u0169" => "u"
-
-# \u016b  [LATIN SMALL LETTER U WITH MACRON]
-"\u016B" => "u"
-
-# \u016d  [LATIN SMALL LETTER U WITH BREVE]
-"\u016D" => "u"
-
-# \u016f  [LATIN SMALL LETTER U WITH RING ABOVE]
-"\u016F" => "u"
-
-# \u0171  [LATIN SMALL LETTER U WITH DOUBLE ACUTE]
-"\u0171" => "u"
-
-# \u0173  [LATIN SMALL LETTER U WITH OGONEK]
-"\u0173" => "u"
-
-# \u01b0  [LATIN SMALL LETTER U WITH HORN]
-"\u01B0" => "u"
-
-# \u01d4  [LATIN SMALL LETTER U WITH CARON]
-"\u01D4" => "u"
-
-# \u01d6  [LATIN SMALL LETTER U WITH DIAERESIS AND MACRON]
-"\u01D6" => "u"
-
-# \u01d8  [LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE]
-"\u01D8" => "u"
-
-# \u01da  [LATIN SMALL LETTER U WITH DIAERESIS AND CARON]
-"\u01DA" => "u"
-
-# \u01dc  [LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE]
-"\u01DC" => "u"
-
-# \u0215  [LATIN SMALL LETTER U WITH DOUBLE GRAVE]
-"\u0215" => "u"
-
-# \u0217  [LATIN SMALL LETTER U WITH INVERTED BREVE]
-"\u0217" => "u"
-
-# \u0289  [LATIN SMALL LETTER U BAR]
-"\u0289" => "u"
-
-# \u1d64  [LATIN SUBSCRIPT SMALL LETTER U]
-"\u1D64" => "u"
-
-# \u1d99  [LATIN SMALL LETTER U WITH RETROFLEX HOOK]
-"\u1D99" => "u"
-
-# \u1e73  [LATIN SMALL LETTER U WITH DIAERESIS BELOW]
-"\u1E73" => "u"
-
-# \u1e75  [LATIN SMALL LETTER U WITH TILDE BELOW]
-"\u1E75" => "u"
-
-# \u1e77  [LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW]
-"\u1E77" => "u"
-
-# \u1e79  [LATIN SMALL LETTER U WITH TILDE AND ACUTE]
-"\u1E79" => "u"
-
-# \u1e7b  [LATIN SMALL LETTER U WITH MACRON AND DIAERESIS]
-"\u1E7B" => "u"
-
-# \u1ee5  [LATIN SMALL LETTER U WITH DOT BELOW]
-"\u1EE5" => "u"
-
-# \u1ee7  [LATIN SMALL LETTER U WITH HOOK ABOVE]
-"\u1EE7" => "u"
-
-# \u1ee9  [LATIN SMALL LETTER U WITH HORN AND ACUTE]
-"\u1EE9" => "u"
-
-# \u1eeb  [LATIN SMALL LETTER U WITH HORN AND GRAVE]
-"\u1EEB" => "u"
-
-# \u1eed  [LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE]
-"\u1EED" => "u"
-
-# \u1eef  [LATIN SMALL LETTER U WITH HORN AND TILDE]
-"\u1EEF" => "u"
-
-# \u1ef1  [LATIN SMALL LETTER U WITH HORN AND DOT BELOW]
-"\u1EF1" => "u"
-
-# \u24e4  [CIRCLED LATIN SMALL LETTER U]
-"\u24E4" => "u"
-
-# \uff55  [FULLWIDTH LATIN SMALL LETTER U]
-"\uFF55" => "u"
-
-# \u24b0  [PARENTHESIZED LATIN SMALL LETTER U]
-"\u24B0" => "(u)"
-
-# \u1d6b  [LATIN SMALL LETTER UE]
-"\u1D6B" => "ue"
-
-# \u01b2  [LATIN CAPITAL LETTER V WITH HOOK]
-"\u01B2" => "V"
-
-# \u0245  [LATIN CAPITAL LETTER TURNED V]
-"\u0245" => "V"
-
-# \u1d20  [LATIN LETTER SMALL CAPITAL V]
-"\u1D20" => "V"
-
-# \u1e7c  [LATIN CAPITAL LETTER V WITH TILDE]
-"\u1E7C" => "V"
-
-# \u1e7e  [LATIN CAPITAL LETTER V WITH DOT BELOW]
-"\u1E7E" => "V"
-
-# \u1efc  [LATIN CAPITAL LETTER MIDDLE-WELSH V]
-"\u1EFC" => "V"
-
-# \u24cb  [CIRCLED LATIN CAPITAL LETTER V]
-"\u24CB" => "V"
-
-# \ua75e  [LATIN CAPITAL LETTER V WITH DIAGONAL STROKE]
-"\uA75E" => "V"
-
-# \ua768  [LATIN CAPITAL LETTER VEND]
-"\uA768" => "V"
-
-# \uff36  [FULLWIDTH LATIN CAPITAL LETTER V]
-"\uFF36" => "V"
-
-# \u028b  [LATIN SMALL LETTER V WITH HOOK]
-"\u028B" => "v"
-
-# \u028c  [LATIN SMALL LETTER TURNED V]
-"\u028C" => "v"
-
-# \u1d65  [LATIN SUBSCRIPT SMALL LETTER V]
-"\u1D65" => "v"
-
-# \u1d8c  [LATIN SMALL LETTER V WITH PALATAL HOOK]
-"\u1D8C" => "v"
-
-# \u1e7d  [LATIN SMALL LETTER V WITH TILDE]
-"\u1E7D" => "v"
-
-# \u1e7f  [LATIN SMALL LETTER V WITH DOT BELOW]
-"\u1E7F" => "v"
-
-# \u24e5  [CIRCLED LATIN SMALL LETTER V]
-"\u24E5" => "v"
-
-# \u2c71  [LATIN SMALL LETTER V WITH RIGHT HOOK]
-"\u2C71" => "v"
-
-# \u2c74  [LATIN SMALL LETTER V WITH CURL]
-"\u2C74" => "v"
-
-# \ua75f  [LATIN SMALL LETTER V WITH DIAGONAL STROKE]
-"\uA75F" => "v"
-
-# \uff56  [FULLWIDTH LATIN SMALL LETTER V]
-"\uFF56" => "v"
-
-# \ua760  [LATIN CAPITAL LETTER VY]
-"\uA760" => "VY"
-
-# \u24b1  [PARENTHESIZED LATIN SMALL LETTER V]
-"\u24B1" => "(v)"
-
-# \ua761  [LATIN SMALL LETTER VY]
-"\uA761" => "vy"
-
-# \u0174  [LATIN CAPITAL LETTER W WITH CIRCUMFLEX]
-"\u0174" => "W"
-
-# \u01f7  http://en.wikipedia.org/wiki/Wynn  [LATIN CAPITAL LETTER WYNN]
-"\u01F7" => "W"
-
-# \u1d21  [LATIN LETTER SMALL CAPITAL W]
-"\u1D21" => "W"
-
-# \u1e80  [LATIN CAPITAL LETTER W WITH GRAVE]
-"\u1E80" => "W"
-
-# \u1e82  [LATIN CAPITAL LETTER W WITH ACUTE]
-"\u1E82" => "W"
-
-# \u1e84  [LATIN CAPITAL LETTER W WITH DIAERESIS]
-"\u1E84" => "W"
-
-# \u1e86  [LATIN CAPITAL LETTER W WITH DOT ABOVE]
-"\u1E86" => "W"
-
-# \u1e88  [LATIN CAPITAL LETTER W WITH DOT BELOW]
-"\u1E88" => "W"
-
-# \u24cc  [CIRCLED LATIN CAPITAL LETTER W]
-"\u24CC" => "W"
-
-# \u2c72  [LATIN CAPITAL LETTER W WITH HOOK]
-"\u2C72" => "W"
-
-# \uff37  [FULLWIDTH LATIN CAPITAL LETTER W]
-"\uFF37" => "W"
-
-# \u0175  [LATIN SMALL LETTER W WITH CIRCUMFLEX]
-"\u0175" => "w"
-
-# \u01bf  http://en.wikipedia.org/wiki/Wynn  [LATIN LETTER WYNN]
-"\u01BF" => "w"
-
-# \u028d  [LATIN SMALL LETTER TURNED W]
-"\u028D" => "w"
-
-# \u1e81  [LATIN SMALL LETTER W WITH GRAVE]
-"\u1E81" => "w"
-
-# \u1e83  [LATIN SMALL LETTER W WITH ACUTE]
-"\u1E83" => "w"
-
-# \u1e85  [LATIN SMALL LETTER W WITH DIAERESIS]
-"\u1E85" => "w"
-
-# \u1e87  [LATIN SMALL LETTER W WITH DOT ABOVE]
-"\u1E87" => "w"
-
-# \u1e89  [LATIN SMALL LETTER W WITH DOT BELOW]
-"\u1E89" => "w"
-
-# \u1e98  [LATIN SMALL LETTER W WITH RING ABOVE]
-"\u1E98" => "w"
-
-# \u24e6  [CIRCLED LATIN SMALL LETTER W]
-"\u24E6" => "w"
-
-# \u2c73  [LATIN SMALL LETTER W WITH HOOK]
-"\u2C73" => "w"
-
-# \uff57  [FULLWIDTH LATIN SMALL LETTER W]
-"\uFF57" => "w"
-
-# \u24b2  [PARENTHESIZED LATIN SMALL LETTER W]
-"\u24B2" => "(w)"
-
-# \u1e8a  [LATIN CAPITAL LETTER X WITH DOT ABOVE]
-"\u1E8A" => "X"
-
-# \u1e8c  [LATIN CAPITAL LETTER X WITH DIAERESIS]
-"\u1E8C" => "X"
-
-# \u24cd  [CIRCLED LATIN CAPITAL LETTER X]
-"\u24CD" => "X"
-
-# \uff38  [FULLWIDTH LATIN CAPITAL LETTER X]
-"\uFF38" => "X"
-
-# \u1d8d  [LATIN SMALL LETTER X WITH PALATAL HOOK]
-"\u1D8D" => "x"
-
-# \u1e8b  [LATIN SMALL LETTER X WITH DOT ABOVE]
-"\u1E8B" => "x"
-
-# \u1e8d  [LATIN SMALL LETTER X WITH DIAERESIS]
-"\u1E8D" => "x"
-
-# \u2093  [LATIN SUBSCRIPT SMALL LETTER X]
-"\u2093" => "x"
-
-# \u24e7  [CIRCLED LATIN SMALL LETTER X]
-"\u24E7" => "x"
-
-# \uff58  [FULLWIDTH LATIN SMALL LETTER X]
-"\uFF58" => "x"
-
-# \u24b3  [PARENTHESIZED LATIN SMALL LETTER X]
-"\u24B3" => "(x)"
-
-# �  [LATIN CAPITAL LETTER Y WITH ACUTE]
-"\u00DD" => "Y"
-
-# \u0176  [LATIN CAPITAL LETTER Y WITH CIRCUMFLEX]
-"\u0176" => "Y"
-
-# \u0178  [LATIN CAPITAL LETTER Y WITH DIAERESIS]
-"\u0178" => "Y"
-
-# \u01b3  [LATIN CAPITAL LETTER Y WITH HOOK]
-"\u01B3" => "Y"
-
-# \u0232  [LATIN CAPITAL LETTER Y WITH MACRON]
-"\u0232" => "Y"
-
-# \u024e  [LATIN CAPITAL LETTER Y WITH STROKE]
-"\u024E" => "Y"
-
-# \u028f  [LATIN LETTER SMALL CAPITAL Y]
-"\u028F" => "Y"
-
-# \u1e8e  [LATIN CAPITAL LETTER Y WITH DOT ABOVE]
-"\u1E8E" => "Y"
-
-# \u1ef2  [LATIN CAPITAL LETTER Y WITH GRAVE]
-"\u1EF2" => "Y"
-
-# \u1ef4  [LATIN CAPITAL LETTER Y WITH DOT BELOW]
-"\u1EF4" => "Y"
-
-# \u1ef6  [LATIN CAPITAL LETTER Y WITH HOOK ABOVE]
-"\u1EF6" => "Y"
-
-# \u1ef8  [LATIN CAPITAL LETTER Y WITH TILDE]
-"\u1EF8" => "Y"
-
-# \u1efe  [LATIN CAPITAL LETTER Y WITH LOOP]
-"\u1EFE" => "Y"
-
-# \u24ce  [CIRCLED LATIN CAPITAL LETTER Y]
-"\u24CE" => "Y"
-
-# \uff39  [FULLWIDTH LATIN CAPITAL LETTER Y]
-"\uFF39" => "Y"
-
-# �  [LATIN SMALL LETTER Y WITH ACUTE]
-"\u00FD" => "y"
-
-# �  [LATIN SMALL LETTER Y WITH DIAERESIS]
-"\u00FF" => "y"
-
-# \u0177  [LATIN SMALL LETTER Y WITH CIRCUMFLEX]
-"\u0177" => "y"
-
-# \u01b4  [LATIN SMALL LETTER Y WITH HOOK]
-"\u01B4" => "y"
-
-# \u0233  [LATIN SMALL LETTER Y WITH MACRON]
-"\u0233" => "y"
-
-# \u024f  [LATIN SMALL LETTER Y WITH STROKE]
-"\u024F" => "y"
-
-# \u028e  [LATIN SMALL LETTER TURNED Y]
-"\u028E" => "y"
-
-# \u1e8f  [LATIN SMALL LETTER Y WITH DOT ABOVE]
-"\u1E8F" => "y"
-
-# \u1e99  [LATIN SMALL LETTER Y WITH RING ABOVE]
-"\u1E99" => "y"
-
-# \u1ef3  [LATIN SMALL LETTER Y WITH GRAVE]
-"\u1EF3" => "y"
-
-# \u1ef5  [LATIN SMALL LETTER Y WITH DOT BELOW]
-"\u1EF5" => "y"
-
-# \u1ef7  [LATIN SMALL LETTER Y WITH HOOK ABOVE]
-"\u1EF7" => "y"
-
-# \u1ef9  [LATIN SMALL LETTER Y WITH TILDE]
-"\u1EF9" => "y"
-
-# \u1eff  [LATIN SMALL LETTER Y WITH LOOP]
-"\u1EFF" => "y"
-
-# \u24e8  [CIRCLED LATIN SMALL LETTER Y]
-"\u24E8" => "y"
-
-# \uff59  [FULLWIDTH LATIN SMALL LETTER Y]
-"\uFF59" => "y"
-
-# \u24b4  [PARENTHESIZED LATIN SMALL LETTER Y]
-"\u24B4" => "(y)"
-
-# \u0179  [LATIN CAPITAL LETTER Z WITH ACUTE]
-"\u0179" => "Z"
-
-# \u017b  [LATIN CAPITAL LETTER Z WITH DOT ABOVE]
-"\u017B" => "Z"
-
-# \u017d  [LATIN CAPITAL LETTER Z WITH CARON]
-"\u017D" => "Z"
-
-# \u01b5  [LATIN CAPITAL LETTER Z WITH STROKE]
-"\u01B5" => "Z"
-
-# \u021c  http://en.wikipedia.org/wiki/Yogh  [LATIN CAPITAL LETTER YOGH]
-"\u021C" => "Z"
-
-# \u0224  [LATIN CAPITAL LETTER Z WITH HOOK]
-"\u0224" => "Z"
-
-# \u1d22  [LATIN LETTER SMALL CAPITAL Z]
-"\u1D22" => "Z"
-
-# \u1e90  [LATIN CAPITAL LETTER Z WITH CIRCUMFLEX]
-"\u1E90" => "Z"
-
-# \u1e92  [LATIN CAPITAL LETTER Z WITH DOT BELOW]
-"\u1E92" => "Z"
-
-# \u1e94  [LATIN CAPITAL LETTER Z WITH LINE BELOW]
-"\u1E94" => "Z"
-
-# \u24cf  [CIRCLED LATIN CAPITAL LETTER Z]
-"\u24CF" => "Z"
-
-# \u2c6b  [LATIN CAPITAL LETTER Z WITH DESCENDER]
-"\u2C6B" => "Z"
-
-# \ua762  [LATIN CAPITAL LETTER VISIGOTHIC Z]
-"\uA762" => "Z"
-
-# \uff3a  [FULLWIDTH LATIN CAPITAL LETTER Z]
-"\uFF3A" => "Z"
-
-# \u017a  [LATIN SMALL LETTER Z WITH ACUTE]
-"\u017A" => "z"
-
-# \u017c  [LATIN SMALL LETTER Z WITH DOT ABOVE]
-"\u017C" => "z"
-
-# \u017e  [LATIN SMALL LETTER Z WITH CARON]
-"\u017E" => "z"
-
-# \u01b6  [LATIN SMALL LETTER Z WITH STROKE]
-"\u01B6" => "z"
-
-# \u021d  http://en.wikipedia.org/wiki/Yogh  [LATIN SMALL LETTER YOGH]
-"\u021D" => "z"
-
-# \u0225  [LATIN SMALL LETTER Z WITH HOOK]
-"\u0225" => "z"
-
-# \u0240  [LATIN SMALL LETTER Z WITH SWASH TAIL]
-"\u0240" => "z"
-
-# \u0290  [LATIN SMALL LETTER Z WITH RETROFLEX HOOK]
-"\u0290" => "z"
-
-# \u0291  [LATIN SMALL LETTER Z WITH CURL]
-"\u0291" => "z"
-
-# \u1d76  [LATIN SMALL LETTER Z WITH MIDDLE TILDE]
-"\u1D76" => "z"
-
-# \u1d8e  [LATIN SMALL LETTER Z WITH PALATAL HOOK]
-"\u1D8E" => "z"
-
-# \u1e91  [LATIN SMALL LETTER Z WITH CIRCUMFLEX]
-"\u1E91" => "z"
-
-# \u1e93  [LATIN SMALL LETTER Z WITH DOT BELOW]
-"\u1E93" => "z"
-
-# \u1e95  [LATIN SMALL LETTER Z WITH LINE BELOW]
-"\u1E95" => "z"
-
-# \u24e9  [CIRCLED LATIN SMALL LETTER Z]
-"\u24E9" => "z"
-
-# \u2c6c  [LATIN SMALL LETTER Z WITH DESCENDER]
-"\u2C6C" => "z"
-
-# \ua763  [LATIN SMALL LETTER VISIGOTHIC Z]
-"\uA763" => "z"
-
-# \uff5a  [FULLWIDTH LATIN SMALL LETTER Z]
-"\uFF5A" => "z"
-
-# \u24b5  [PARENTHESIZED LATIN SMALL LETTER Z]
-"\u24B5" => "(z)"
-
-# \u2070  [SUPERSCRIPT ZERO]
-"\u2070" => "0"
-
-# \u2080  [SUBSCRIPT ZERO]
-"\u2080" => "0"
-
-# \u24ea  [CIRCLED DIGIT ZERO]
-"\u24EA" => "0"
-
-# \u24ff  [NEGATIVE CIRCLED DIGIT ZERO]
-"\u24FF" => "0"
-
-# \uff10  [FULLWIDTH DIGIT ZERO]
-"\uFF10" => "0"
-
-# �  [SUPERSCRIPT ONE]
-"\u00B9" => "1"
-
-# \u2081  [SUBSCRIPT ONE]
-"\u2081" => "1"
-
-# \u2460  [CIRCLED DIGIT ONE]
-"\u2460" => "1"
-
-# \u24f5  [DOUBLE CIRCLED DIGIT ONE]
-"\u24F5" => "1"
-
-# \u2776  [DINGBAT NEGATIVE CIRCLED DIGIT ONE]
-"\u2776" => "1"
-
-# \u2780  [DINGBAT CIRCLED SANS-SERIF DIGIT ONE]
-"\u2780" => "1"
-
-# \u278a  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT ONE]
-"\u278A" => "1"
-
-# \uff11  [FULLWIDTH DIGIT ONE]
-"\uFF11" => "1"
-
-# \u2488  [DIGIT ONE FULL STOP]
-"\u2488" => "1."
-
-# \u2474  [PARENTHESIZED DIGIT ONE]
-"\u2474" => "(1)"
-
-# �  [SUPERSCRIPT TWO]
-"\u00B2" => "2"
-
-# \u2082  [SUBSCRIPT TWO]
-"\u2082" => "2"
-
-# \u2461  [CIRCLED DIGIT TWO]
-"\u2461" => "2"
-
-# \u24f6  [DOUBLE CIRCLED DIGIT TWO]
-"\u24F6" => "2"
-
-# \u2777  [DINGBAT NEGATIVE CIRCLED DIGIT TWO]
-"\u2777" => "2"
-
-# \u2781  [DINGBAT CIRCLED SANS-SERIF DIGIT TWO]
-"\u2781" => "2"
-
-# \u278b  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT TWO]
-"\u278B" => "2"
-
-# \uff12  [FULLWIDTH DIGIT TWO]
-"\uFF12" => "2"
-
-# \u2489  [DIGIT TWO FULL STOP]
-"\u2489" => "2."
-
-# \u2475  [PARENTHESIZED DIGIT TWO]
-"\u2475" => "(2)"
-
-# �  [SUPERSCRIPT THREE]
-"\u00B3" => "3"
-
-# \u2083  [SUBSCRIPT THREE]
-"\u2083" => "3"
-
-# \u2462  [CIRCLED DIGIT THREE]
-"\u2462" => "3"
-
-# \u24f7  [DOUBLE CIRCLED DIGIT THREE]
-"\u24F7" => "3"
-
-# \u2778  [DINGBAT NEGATIVE CIRCLED DIGIT THREE]
-"\u2778" => "3"
-
-# \u2782  [DINGBAT CIRCLED SANS-SERIF DIGIT THREE]
-"\u2782" => "3"
-
-# \u278c  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT THREE]
-"\u278C" => "3"
-
-# \uff13  [FULLWIDTH DIGIT THREE]
-"\uFF13" => "3"
-
-# \u248a  [DIGIT THREE FULL STOP]
-"\u248A" => "3."
-
-# \u2476  [PARENTHESIZED DIGIT THREE]
-"\u2476" => "(3)"
-
-# \u2074  [SUPERSCRIPT FOUR]
-"\u2074" => "4"
-
-# \u2084  [SUBSCRIPT FOUR]
-"\u2084" => "4"
-
-# \u2463  [CIRCLED DIGIT FOUR]
-"\u2463" => "4"
-
-# \u24f8  [DOUBLE CIRCLED DIGIT FOUR]
-"\u24F8" => "4"
-
-# \u2779  [DINGBAT NEGATIVE CIRCLED DIGIT FOUR]
-"\u2779" => "4"
-
-# \u2783  [DINGBAT CIRCLED SANS-SERIF DIGIT FOUR]
-"\u2783" => "4"
-
-# \u278d  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FOUR]
-"\u278D" => "4"
-
-# \uff14  [FULLWIDTH DIGIT FOUR]
-"\uFF14" => "4"
-
-# \u248b  [DIGIT FOUR FULL STOP]
-"\u248B" => "4."
-
-# \u2477  [PARENTHESIZED DIGIT FOUR]
-"\u2477" => "(4)"
-
-# \u2075  [SUPERSCRIPT FIVE]
-"\u2075" => "5"
-
-# \u2085  [SUBSCRIPT FIVE]
-"\u2085" => "5"
-
-# \u2464  [CIRCLED DIGIT FIVE]
-"\u2464" => "5"
-
-# \u24f9  [DOUBLE CIRCLED DIGIT FIVE]
-"\u24F9" => "5"
-
-# \u277a  [DINGBAT NEGATIVE CIRCLED DIGIT FIVE]
-"\u277A" => "5"
-
-# \u2784  [DINGBAT CIRCLED SANS-SERIF DIGIT FIVE]
-"\u2784" => "5"
-
-# \u278e  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FIVE]
-"\u278E" => "5"
-
-# \uff15  [FULLWIDTH DIGIT FIVE]
-"\uFF15" => "5"
-
-# \u248c  [DIGIT FIVE FULL STOP]
-"\u248C" => "5."
-
-# \u2478  [PARENTHESIZED DIGIT FIVE]
-"\u2478" => "(5)"
-
-# \u2076  [SUPERSCRIPT SIX]
-"\u2076" => "6"
-
-# \u2086  [SUBSCRIPT SIX]
-"\u2086" => "6"
-
-# \u2465  [CIRCLED DIGIT SIX]
-"\u2465" => "6"
-
-# \u24fa  [DOUBLE CIRCLED DIGIT SIX]
-"\u24FA" => "6"
-
-# \u277b  [DINGBAT NEGATIVE CIRCLED DIGIT SIX]
-"\u277B" => "6"
-
-# \u2785  [DINGBAT CIRCLED SANS-SERIF DIGIT SIX]
-"\u2785" => "6"
-
-# \u278f  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SIX]
-"\u278F" => "6"
-
-# \uff16  [FULLWIDTH DIGIT SIX]
-"\uFF16" => "6"
-
-# \u248d  [DIGIT SIX FULL STOP]
-"\u248D" => "6."
-
-# \u2479  [PARENTHESIZED DIGIT SIX]
-"\u2479" => "(6)"
-
-# \u2077  [SUPERSCRIPT SEVEN]
-"\u2077" => "7"
-
-# \u2087  [SUBSCRIPT SEVEN]
-"\u2087" => "7"
-
-# \u2466  [CIRCLED DIGIT SEVEN]
-"\u2466" => "7"
-
-# \u24fb  [DOUBLE CIRCLED DIGIT SEVEN]
-"\u24FB" => "7"
-
-# \u277c  [DINGBAT NEGATIVE CIRCLED DIGIT SEVEN]
-"\u277C" => "7"
-
-# \u2786  [DINGBAT CIRCLED SANS-SERIF DIGIT SEVEN]
-"\u2786" => "7"
-
-# \u2790  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SEVEN]
-"\u2790" => "7"
-
-# \uff17  [FULLWIDTH DIGIT SEVEN]
-"\uFF17" => "7"
-
-# \u248e  [DIGIT SEVEN FULL STOP]
-"\u248E" => "7."
-
-# \u247a  [PARENTHESIZED DIGIT SEVEN]
-"\u247A" => "(7)"
-
-# \u2078  [SUPERSCRIPT EIGHT]
-"\u2078" => "8"
-
-# \u2088  [SUBSCRIPT EIGHT]
-"\u2088" => "8"
-
-# \u2467  [CIRCLED DIGIT EIGHT]
-"\u2467" => "8"
-
-# \u24fc  [DOUBLE CIRCLED DIGIT EIGHT]
-"\u24FC" => "8"
-
-# \u277d  [DINGBAT NEGATIVE CIRCLED DIGIT EIGHT]
-"\u277D" => "8"
-
-# \u2787  [DINGBAT CIRCLED SANS-SERIF DIGIT EIGHT]
-"\u2787" => "8"
-
-# \u2791  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT EIGHT]
-"\u2791" => "8"
-
-# \uff18  [FULLWIDTH DIGIT EIGHT]
-"\uFF18" => "8"
-
-# \u248f  [DIGIT EIGHT FULL STOP]
-"\u248F" => "8."
-
-# \u247b  [PARENTHESIZED DIGIT EIGHT]
-"\u247B" => "(8)"
-
-# \u2079  [SUPERSCRIPT NINE]
-"\u2079" => "9"
-
-# \u2089  [SUBSCRIPT NINE]
-"\u2089" => "9"
-
-# \u2468  [CIRCLED DIGIT NINE]
-"\u2468" => "9"
-
-# \u24fd  [DOUBLE CIRCLED DIGIT NINE]
-"\u24FD" => "9"
-
-# \u277e  [DINGBAT NEGATIVE CIRCLED DIGIT NINE]
-"\u277E" => "9"
-
-# \u2788  [DINGBAT CIRCLED SANS-SERIF DIGIT NINE]
-"\u2788" => "9"
-
-# \u2792  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT NINE]
-"\u2792" => "9"
-
-# \uff19  [FULLWIDTH DIGIT NINE]
-"\uFF19" => "9"
-
-# \u2490  [DIGIT NINE FULL STOP]
-"\u2490" => "9."
-
-# \u247c  [PARENTHESIZED DIGIT NINE]
-"\u247C" => "(9)"
-
-# \u2469  [CIRCLED NUMBER TEN]
-"\u2469" => "10"
-
-# \u24fe  [DOUBLE CIRCLED NUMBER TEN]
-"\u24FE" => "10"
-
-# \u277f  [DINGBAT NEGATIVE CIRCLED NUMBER TEN]
-"\u277F" => "10"
-
-# \u2789  [DINGBAT CIRCLED SANS-SERIF NUMBER TEN]
-"\u2789" => "10"
-
-# \u2793  [DINGBAT NEGATIVE CIRCLED SANS-SERIF NUMBER TEN]
-"\u2793" => "10"
-
-# \u2491  [NUMBER TEN FULL STOP]
-"\u2491" => "10."
-
-# \u247d  [PARENTHESIZED NUMBER TEN]
-"\u247D" => "(10)"
-
-# \u246a  [CIRCLED NUMBER ELEVEN]
-"\u246A" => "11"
-
-# \u24eb  [NEGATIVE CIRCLED NUMBER ELEVEN]
-"\u24EB" => "11"
-
-# \u2492  [NUMBER ELEVEN FULL STOP]
-"\u2492" => "11."
-
-# \u247e  [PARENTHESIZED NUMBER ELEVEN]
-"\u247E" => "(11)"
-
-# \u246b  [CIRCLED NUMBER TWELVE]
-"\u246B" => "12"
-
-# \u24ec  [NEGATIVE CIRCLED NUMBER TWELVE]
-"\u24EC" => "12"
-
-# \u2493  [NUMBER TWELVE FULL STOP]
-"\u2493" => "12."
-
-# \u247f  [PARENTHESIZED NUMBER TWELVE]
-"\u247F" => "(12)"
-
-# \u246c  [CIRCLED NUMBER THIRTEEN]
-"\u246C" => "13"
-
-# \u24ed  [NEGATIVE CIRCLED NUMBER THIRTEEN]
-"\u24ED" => "13"
-
-# \u2494  [NUMBER THIRTEEN FULL STOP]
-"\u2494" => "13."
-
-# \u2480  [PARENTHESIZED NUMBER THIRTEEN]
-"\u2480" => "(13)"
-
-# \u246d  [CIRCLED NUMBER FOURTEEN]
-"\u246D" => "14"
-
-# \u24ee  [NEGATIVE CIRCLED NUMBER FOURTEEN]
-"\u24EE" => "14"
-
-# \u2495  [NUMBER FOURTEEN FULL STOP]
-"\u2495" => "14."
-
-# \u2481  [PARENTHESIZED NUMBER FOURTEEN]
-"\u2481" => "(14)"
-
-# \u246e  [CIRCLED NUMBER FIFTEEN]
-"\u246E" => "15"
-
-# \u24ef  [NEGATIVE CIRCLED NUMBER FIFTEEN]
-"\u24EF" => "15"
-
-# \u2496  [NUMBER FIFTEEN FULL STOP]
-"\u2496" => "15."
-
-# \u2482  [PARENTHESIZED NUMBER FIFTEEN]
-"\u2482" => "(15)"
-
-# \u246f  [CIRCLED NUMBER SIXTEEN]
-"\u246F" => "16"
-
-# \u24f0  [NEGATIVE CIRCLED NUMBER SIXTEEN]
-"\u24F0" => "16"
-
-# \u2497  [NUMBER SIXTEEN FULL STOP]
-"\u2497" => "16."
-
-# \u2483  [PARENTHESIZED NUMBER SIXTEEN]
-"\u2483" => "(16)"
-
-# \u2470  [CIRCLED NUMBER SEVENTEEN]
-"\u2470" => "17"
-
-# \u24f1  [NEGATIVE CIRCLED NUMBER SEVENTEEN]
-"\u24F1" => "17"
-
-# \u2498  [NUMBER SEVENTEEN FULL STOP]
-"\u2498" => "17."
-
-# \u2484  [PARENTHESIZED NUMBER SEVENTEEN]
-"\u2484" => "(17)"
-
-# \u2471  [CIRCLED NUMBER EIGHTEEN]
-"\u2471" => "18"
-
-# \u24f2  [NEGATIVE CIRCLED NUMBER EIGHTEEN]
-"\u24F2" => "18"
-
-# \u2499  [NUMBER EIGHTEEN FULL STOP]
-"\u2499" => "18."
-
-# \u2485  [PARENTHESIZED NUMBER EIGHTEEN]
-"\u2485" => "(18)"
-
-# \u2472  [CIRCLED NUMBER NINETEEN]
-"\u2472" => "19"
-
-# \u24f3  [NEGATIVE CIRCLED NUMBER NINETEEN]
-"\u24F3" => "19"
-
-# \u249a  [NUMBER NINETEEN FULL STOP]
-"\u249A" => "19."
-
-# \u2486  [PARENTHESIZED NUMBER NINETEEN]
-"\u2486" => "(19)"
-
-# \u2473  [CIRCLED NUMBER TWENTY]
-"\u2473" => "20"
-
-# \u24f4  [NEGATIVE CIRCLED NUMBER TWENTY]
-"\u24F4" => "20"
-
-# \u249b  [NUMBER TWENTY FULL STOP]
-"\u249B" => "20."
-
-# \u2487  [PARENTHESIZED NUMBER TWENTY]
-"\u2487" => "(20)"
-
-# �  [LEFT-POINTING DOUBLE ANGLE QUOTATION MARK]
-"\u00AB" => "\""
-
-# �  [RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK]
-"\u00BB" => "\""
-
-# \u201c  [LEFT DOUBLE QUOTATION MARK]
-"\u201C" => "\""
-
-# \u201d  [RIGHT DOUBLE QUOTATION MARK]
-"\u201D" => "\""
-
-# \u201e  [DOUBLE LOW-9 QUOTATION MARK]
-"\u201E" => "\""
-
-# \u2033  [DOUBLE PRIME]
-"\u2033" => "\""
-
-# \u2036  [REVERSED DOUBLE PRIME]
-"\u2036" => "\""
-
-# \u275d  [HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT]
-"\u275D" => "\""
-
-# \u275e  [HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT]
-"\u275E" => "\""
-
-# \u276e  [HEAVY LEFT-POINTING ANGLE QUOTATION MARK ORNAMENT]
-"\u276E" => "\""
-
-# \u276f  [HEAVY RIGHT-POINTING ANGLE QUOTATION MARK ORNAMENT]
-"\u276F" => "\""
-
-# \uff02  [FULLWIDTH QUOTATION MARK]
-"\uFF02" => "\""
-
-# \u2018  [LEFT SINGLE QUOTATION MARK]
-"\u2018" => "\'"
-
-# \u2019  [RIGHT SINGLE QUOTATION MARK]
-"\u2019" => "\'"
-
-# \u201a  [SINGLE LOW-9 QUOTATION MARK]
-"\u201A" => "\'"
-
-# \u201b  [SINGLE HIGH-REVERSED-9 QUOTATION MARK]
-"\u201B" => "\'"
-
-# \u2032  [PRIME]
-"\u2032" => "\'"
-
-# \u2035  [REVERSED PRIME]
-"\u2035" => "\'"
-
-# \u2039  [SINGLE LEFT-POINTING ANGLE QUOTATION MARK]
-"\u2039" => "\'"
-
-# \u203a  [SINGLE RIGHT-POINTING ANGLE QUOTATION MARK]
-"\u203A" => "\'"
-
-# \u275b  [HEAVY SINGLE TURNED COMMA QUOTATION MARK ORNAMENT]
-"\u275B" => "\'"
-
-# \u275c  [HEAVY SINGLE COMMA QUOTATION MARK ORNAMENT]
-"\u275C" => "\'"
-
-# \uff07  [FULLWIDTH APOSTROPHE]
-"\uFF07" => "\'"
-
-# \u2010  [HYPHEN]
-"\u2010" => "-"
-
-# \u2011  [NON-BREAKING HYPHEN]
-"\u2011" => "-"
-
-# \u2012  [FIGURE DASH]
-"\u2012" => "-"
-
-# \u2013  [EN DASH]
-"\u2013" => "-"
-
-# \u2014  [EM DASH]
-"\u2014" => "-"
-
-# \u207b  [SUPERSCRIPT MINUS]
-"\u207B" => "-"
-
-# \u208b  [SUBSCRIPT MINUS]
-"\u208B" => "-"
-
-# \uff0d  [FULLWIDTH HYPHEN-MINUS]
-"\uFF0D" => "-"
-
-# \u2045  [LEFT SQUARE BRACKET WITH QUILL]
-"\u2045" => "["
-
-# \u2772  [LIGHT LEFT TORTOISE SHELL BRACKET ORNAMENT]
-"\u2772" => "["
-
-# \uff3b  [FULLWIDTH LEFT SQUARE BRACKET]
-"\uFF3B" => "["
-
-# \u2046  [RIGHT SQUARE BRACKET WITH QUILL]
-"\u2046" => "]"
-
-# \u2773  [LIGHT RIGHT TORTOISE SHELL BRACKET ORNAMENT]
-"\u2773" => "]"
-
-# \uff3d  [FULLWIDTH RIGHT SQUARE BRACKET]
-"\uFF3D" => "]"
-
-# \u207d  [SUPERSCRIPT LEFT PARENTHESIS]
-"\u207D" => "("
-
-# \u208d  [SUBSCRIPT LEFT PARENTHESIS]
-"\u208D" => "("
-
-# \u2768  [MEDIUM LEFT PARENTHESIS ORNAMENT]
-"\u2768" => "("
-
-# \u276a  [MEDIUM FLATTENED LEFT PARENTHESIS ORNAMENT]
-"\u276A" => "("
-
-# \uff08  [FULLWIDTH LEFT PARENTHESIS]
-"\uFF08" => "("
-
-# \u2e28  [LEFT DOUBLE PARENTHESIS]
-"\u2E28" => "(("
-
-# \u207e  [SUPERSCRIPT RIGHT PARENTHESIS]
-"\u207E" => ")"
-
-# \u208e  [SUBSCRIPT RIGHT PARENTHESIS]
-"\u208E" => ")"
-
-# \u2769  [MEDIUM RIGHT PARENTHESIS ORNAMENT]
-"\u2769" => ")"
-
-# \u276b  [MEDIUM FLATTENED RIGHT PARENTHESIS ORNAMENT]
-"\u276B" => ")"
-
-# \uff09  [FULLWIDTH RIGHT PARENTHESIS]
-"\uFF09" => ")"
-
-# \u2e29  [RIGHT DOUBLE PARENTHESIS]
-"\u2E29" => "))"
-
-# \u276c  [MEDIUM LEFT-POINTING ANGLE BRACKET ORNAMENT]
-"\u276C" => "<"
-
-# \u2770  [HEAVY LEFT-POINTING ANGLE BRACKET ORNAMENT]
-"\u2770" => "<"
-
-# \uff1c  [FULLWIDTH LESS-THAN SIGN]
-"\uFF1C" => "<"
-
-# \u276d  [MEDIUM RIGHT-POINTING ANGLE BRACKET ORNAMENT]
-"\u276D" => ">"
-
-# \u2771  [HEAVY RIGHT-POINTING ANGLE BRACKET ORNAMENT]
-"\u2771" => ">"
-
-# \uff1e  [FULLWIDTH GREATER-THAN SIGN]
-"\uFF1E" => ">"
-
-# \u2774  [MEDIUM LEFT CURLY BRACKET ORNAMENT]
-"\u2774" => "{"
-
-# \uff5b  [FULLWIDTH LEFT CURLY BRACKET]
-"\uFF5B" => "{"
-
-# \u2775  [MEDIUM RIGHT CURLY BRACKET ORNAMENT]
-"\u2775" => "}"
-
-# \uff5d  [FULLWIDTH RIGHT CURLY BRACKET]
-"\uFF5D" => "}"
-
-# \u207a  [SUPERSCRIPT PLUS SIGN]
-"\u207A" => "+"
-
-# \u208a  [SUBSCRIPT PLUS SIGN]
-"\u208A" => "+"
-
-# \uff0b  [FULLWIDTH PLUS SIGN]
-"\uFF0B" => "+"
-
-# \u207c  [SUPERSCRIPT EQUALS SIGN]
-"\u207C" => "="
-
-# \u208c  [SUBSCRIPT EQUALS SIGN]
-"\u208C" => "="
-
-# \uff1d  [FULLWIDTH EQUALS SIGN]
-"\uFF1D" => "="
-
-# \uff01  [FULLWIDTH EXCLAMATION MARK]
-"\uFF01" => "!"
-
-# \u203c  [DOUBLE EXCLAMATION MARK]
-"\u203C" => "!!"
-
-# \u2049  [EXCLAMATION QUESTION MARK]
-"\u2049" => "!?"
-
-# \uff03  [FULLWIDTH NUMBER SIGN]
-"\uFF03" => "#"
-
-# \uff04  [FULLWIDTH DOLLAR SIGN]
-"\uFF04" => "$"
-
-# \u2052  [COMMERCIAL MINUS SIGN]
-"\u2052" => "%"
-
-# \uff05  [FULLWIDTH PERCENT SIGN]
-"\uFF05" => "%"
-
-# \uff06  [FULLWIDTH AMPERSAND]
-"\uFF06" => "&"
-
-# \u204e  [LOW ASTERISK]
-"\u204E" => "*"
-
-# \uff0a  [FULLWIDTH ASTERISK]
-"\uFF0A" => "*"
-
-# \uff0c  [FULLWIDTH COMMA]
-"\uFF0C" => ","
-
-# \uff0e  [FULLWIDTH FULL STOP]
-"\uFF0E" => "."
-
-# \u2044  [FRACTION SLASH]
-"\u2044" => "/"
-
-# \uff0f  [FULLWIDTH SOLIDUS]
-"\uFF0F" => "/"
-
-# \uff1a  [FULLWIDTH COLON]
-"\uFF1A" => ":"
-
-# \u204f  [REVERSED SEMICOLON]
-"\u204F" => ";"
-
-# \uff1b  [FULLWIDTH SEMICOLON]
-"\uFF1B" => ";"
-
-# \uff1f  [FULLWIDTH QUESTION MARK]
-"\uFF1F" => "?"
-
-# \u2047  [DOUBLE QUESTION MARK]
-"\u2047" => "??"
-
-# \u2048  [QUESTION EXCLAMATION MARK]
-"\u2048" => "?!"
-
-# \uff20  [FULLWIDTH COMMERCIAL AT]
-"\uFF20" => "@"
-
-# \uff3c  [FULLWIDTH REVERSE SOLIDUS]
-"\uFF3C" => "\\"
-
-# \u2038  [CARET]
-"\u2038" => "^"
-
-# \uff3e  [FULLWIDTH CIRCUMFLEX ACCENT]
-"\uFF3E" => "^"
-
-# \uff3f  [FULLWIDTH LOW LINE]
-"\uFF3F" => "_"
-
-# \u2053  [SWUNG DASH]
-"\u2053" => "~"
-
-# \uff5e  [FULLWIDTH TILDE]
-"\uFF5E" => "~"
-
-################################################################
-# Below is the Perl script used to generate the above mappings #
-# from ASCIIFoldingFilter.java:                                #
-################################################################
-#
-# #!/usr/bin/perl
-#
-# use warnings;
-# use strict;
-# 
-# my @source_chars = ();
-# my @source_char_descriptions = ();
-# my $target = '';
-# 
-# while (<>) {
-#   if (/case\s+'(\\u[A-F0-9]+)':\s*\/\/\s*(.*)/i) {
-#     push @source_chars, $1;
-#	  push @source_char_descriptions, $2;
-#	  next;
-#   }
-#   if (/output\[[^\]]+\]\s*=\s*'(\\'|\\\\|.)'/) {
-#     $target .= $1;
-#     next;
-#   }
-#   if (/break;/) {
-#     $target = "\\\"" if ($target eq '"');
-#     for my $source_char_num (0..$#source_chars) {
-#	    print "# $source_char_descriptions[$source_char_num]\n";
-#	    print "\"$source_chars[$source_char_num]\" => \"$target\"\n\n";
-#	  }
-#	  @source_chars = ();
-#	  @source_char_descriptions = ();
-#	  $target = '';
-#   }
-# }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/mapping-ISOLatin1Accent.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/mapping-ISOLatin1Accent.txt b/solr/example/example-DIH/solr/rss/conf/mapping-ISOLatin1Accent.txt
deleted file mode 100644
index ede7742..0000000
--- a/solr/example/example-DIH/solr/rss/conf/mapping-ISOLatin1Accent.txt
+++ /dev/null
@@ -1,246 +0,0 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Syntax:
-#   "source" => "target"
-#     "source".length() > 0 (source cannot be empty.)
-#     "target".length() >= 0 (target can be empty.)
-
-# example:
-#   "�" => "A"
-#   "\u00C0" => "A"
-#   "\u00C0" => "\u0041"
-#   "�" => "ss"
-#   "\t" => " "
-#   "\n" => ""
-
-# � => A
-"\u00C0" => "A"
-
-# � => A
-"\u00C1" => "A"
-
-# � => A
-"\u00C2" => "A"
-
-# � => A
-"\u00C3" => "A"
-
-# � => A
-"\u00C4" => "A"
-
-# � => A
-"\u00C5" => "A"
-
-# � => AE
-"\u00C6" => "AE"
-
-# � => C
-"\u00C7" => "C"
-
-# � => E
-"\u00C8" => "E"
-
-# � => E
-"\u00C9" => "E"
-
-# � => E
-"\u00CA" => "E"
-
-# � => E
-"\u00CB" => "E"
-
-# � => I
-"\u00CC" => "I"
-
-# � => I
-"\u00CD" => "I"
-
-# � => I
-"\u00CE" => "I"
-
-# � => I
-"\u00CF" => "I"
-
-# \u0132 => IJ
-"\u0132" => "IJ"
-
-# � => D
-"\u00D0" => "D"
-
-# � => N
-"\u00D1" => "N"
-
-# � => O
-"\u00D2" => "O"
-
-# � => O
-"\u00D3" => "O"
-
-# � => O
-"\u00D4" => "O"
-
-# � => O
-"\u00D5" => "O"
-
-# � => O
-"\u00D6" => "O"
-
-# � => O
-"\u00D8" => "O"
-
-# \u0152 => OE
-"\u0152" => "OE"
-
-# �
-"\u00DE" => "TH"
-
-# � => U
-"\u00D9" => "U"
-
-# � => U
-"\u00DA" => "U"
-
-# � => U
-"\u00DB" => "U"
-
-# � => U
-"\u00DC" => "U"
-
-# � => Y
-"\u00DD" => "Y"
-
-# \u0178 => Y
-"\u0178" => "Y"
-
-# � => a
-"\u00E0" => "a"
-
-# � => a
-"\u00E1" => "a"
-
-# � => a
-"\u00E2" => "a"
-
-# � => a
-"\u00E3" => "a"
-
-# � => a
-"\u00E4" => "a"
-
-# � => a
-"\u00E5" => "a"
-
-# � => ae
-"\u00E6" => "ae"
-
-# � => c
-"\u00E7" => "c"
-
-# � => e
-"\u00E8" => "e"
-
-# � => e
-"\u00E9" => "e"
-
-# � => e
-"\u00EA" => "e"
-
-# � => e
-"\u00EB" => "e"
-
-# � => i
-"\u00EC" => "i"
-
-# � => i
-"\u00ED" => "i"
-
-# � => i
-"\u00EE" => "i"
-
-# � => i
-"\u00EF" => "i"
-
-# \u0133 => ij
-"\u0133" => "ij"
-
-# � => d
-"\u00F0" => "d"
-
-# � => n
-"\u00F1" => "n"
-
-# � => o
-"\u00F2" => "o"
-
-# � => o
-"\u00F3" => "o"
-
-# � => o
-"\u00F4" => "o"
-
-# � => o
-"\u00F5" => "o"
-
-# � => o
-"\u00F6" => "o"
-
-# � => o
-"\u00F8" => "o"
-
-# \u0153 => oe
-"\u0153" => "oe"
-
-# � => ss
-"\u00DF" => "ss"
-
-# � => th
-"\u00FE" => "th"
-
-# � => u
-"\u00F9" => "u"
-
-# � => u
-"\u00FA" => "u"
-
-# � => u
-"\u00FB" => "u"
-
-# � => u
-"\u00FC" => "u"
-
-# � => y
-"\u00FD" => "y"
-
-# � => y
-"\u00FF" => "y"
-
-# \ufb00 => ff
-"\uFB00" => "ff"
-
-# \ufb01 => fi
-"\uFB01" => "fi"
-
-# \ufb02 => fl
-"\uFB02" => "fl"
-
-# \ufb03 => ffi
-"\uFB03" => "ffi"
-
-# \ufb04 => ffl
-"\uFB04" => "ffl"
-
-# \ufb05 => ft
-"\uFB05" => "ft"
-
-# \ufb06 => st
-"\uFB06" => "st"

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/protwords.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/protwords.txt b/solr/example/example-DIH/solr/rss/conf/protwords.txt
deleted file mode 100644
index 1dfc0ab..0000000
--- a/solr/example/example-DIH/solr/rss/conf/protwords.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#-----------------------------------------------------------------------
-# Use a protected word file to protect against the stemmer reducing two
-# unrelated words to the same base word.
-
-# Some non-words that normally won't be encountered,
-# just to test that they won't be stemmed.
-dontstems
-zwhacky
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/rss-data-config.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/rss-data-config.xml b/solr/example/example-DIH/solr/rss/conf/rss-data-config.xml
deleted file mode 100644
index 704325b..0000000
--- a/solr/example/example-DIH/solr/rss/conf/rss-data-config.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<dataConfig>
-    <dataSource type="URLDataSource" />
-    <document>
-        <entity name="slashdot"
-                pk="link"
-                url="http://rss.slashdot.org/Slashdot/slashdot"
-                processor="XPathEntityProcessor"
-                forEach="/rss/channel/item"
-                transformer="DateFormatTransformer">
-        
-            <field column="source" xpath="/rss/channel/title" commonField="true" />
-            <field column="source-link" xpath="/rss/channel/link" commonField="true" />
-            <field column="subject" xpath="/rss/channel/subject" commonField="true" />
-      
-            <field column="title" xpath="/rss/channel/item/title" />
-            <field column="link" xpath="/rss/channel/item/link" />
-            <field column="description" xpath="/rss/channel/item/description" />
-            <field column="creator" xpath="/rss/channel/item/creator" />
-            <field column="item-subject" xpath="/rss/channel/item/subject" />
-            <field column="date" xpath="/rss/channel/item/date" dateTimeFormat="yyyy-MM-dd'T'HH:mm:ss" />
-            <field column="slash-department" xpath="/rss/channel/item/department" />
-            <field column="slash-section" xpath="/rss/channel/item/section" />
-            <field column="slash-comments" xpath="/rss/channel/item/comments" />
-        </entity>
-    </document>
-</dataConfig>


[16/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-7452: change terminology from _m missing-bucket to _p partial-bucket for refinement

Posted by ab...@apache.org.
SOLR-7452: change terminology from _m missing-bucket to _p partial-bucket for refinement


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/66bfdcbd
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/66bfdcbd
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/66bfdcbd

Branch: refs/heads/jira/solr-9959
Commit: 66bfdcbdbab8f294341232946a30a61898228a34
Parents: 0a689f4
Author: yonik <yo...@apache.org>
Authored: Tue Mar 28 19:52:51 2017 -0400
Committer: yonik <yo...@apache.org>
Committed: Tue Mar 28 19:52:51 2017 -0400

----------------------------------------------------------------------
 .../solr/search/facet/FacetFieldProcessor.java      |  6 +++---
 .../solr/search/facet/FacetRequestSortedMerger.java | 16 ++++++++--------
 .../solr/search/facet/TestJsonFacetRefinement.java  | 11 +++++------
 3 files changed, 16 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66bfdcbd/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
index 97d8607..a29e78d 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldProcessor.java
@@ -530,13 +530,13 @@ abstract class FacetFieldProcessor extends FacetProcessor<FacetField> {
   protected SimpleOrderedMap<Object> refineFacets() throws IOException {
     List leaves = asList(fcontext.facetInfo.get("_l"));        // We have not seen this bucket: do full faceting for this bucket, including all sub-facets
     List<List> skip = asList(fcontext.facetInfo.get("_s"));    // We have seen this bucket, so skip stats on it, and skip sub-facets except for the specified sub-facets that should calculate specified buckets.
-    List<List> missing = asList(fcontext.facetInfo.get("_m")); // We have not seen this bucket, do full faceting for this bucket, and most sub-facets... but some sub-facets should only visit specified buckets.
+    List<List> partial = asList(fcontext.facetInfo.get("_p")); // We have not seen this bucket, do full faceting for this bucket, and most sub-facets... but some sub-facets are partial and should only visit specified buckets.
 
     // For leaf refinements, we do full faceting for each leaf bucket.  Any sub-facets of these buckets will be fully evaluated.  Because of this, we should never
     // encounter leaf refinements that have sub-facets that return partial results.
 
     SimpleOrderedMap<Object> res = new SimpleOrderedMap<>();
-    List<SimpleOrderedMap> bucketList = new ArrayList<>( leaves.size() + skip.size() + missing.size() );
+    List<SimpleOrderedMap> bucketList = new ArrayList<>( leaves.size() + skip.size() + partial.size() );
     res.add("buckets", bucketList);
 
     // TODO: an alternate implementations can fill all accs at once
@@ -555,7 +555,7 @@ abstract class FacetFieldProcessor extends FacetProcessor<FacetField> {
     }
 
     // The only difference between skip and missing is the value of "skip" passed to refineBucket
-    for (List bucketAndFacetInfo : missing) {
+    for (List bucketAndFacetInfo : partial) {
       assert bucketAndFacetInfo.size() == 2;
       Object bucketVal = bucketAndFacetInfo.get(0);
       Map<String,Object> facetInfo = (Map<String, Object>) bucketAndFacetInfo.get(1);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66bfdcbd/solr/core/src/java/org/apache/solr/search/facet/FacetRequestSortedMerger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetRequestSortedMerger.java b/solr/core/src/java/org/apache/solr/search/facet/FacetRequestSortedMerger.java
index f55fc0f..e05064c 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetRequestSortedMerger.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetRequestSortedMerger.java
@@ -161,7 +161,7 @@ abstract class FacetRequestSortedMerger<FacetRequestT extends FacetRequestSorted
     boolean thisMissing = mcontext.bucketWasMissing(); // Was this whole facet missing (i.e. inside a bucket that was missing)?
 
     // TODO: add information in sub-shard response about dropped buckets (i.e. not all returned due to limit)
-    // If we know we've seen all the buckets from a shard, then we don't have to add to leafBuckets or missingBuckets, only skipBuckets
+    // If we know we've seen all the buckets from a shard, then we don't have to add to leafBuckets or partialBuckets, only skipBuckets
     boolean isCommandPartial = freq.returnsPartial();
     boolean returnedAllBuckets = !isCommandPartial && !thisMissing;  // did the shard return all of the possible buckets?
 
@@ -189,7 +189,7 @@ abstract class FacetRequestSortedMerger<FacetRequestT extends FacetRequestSorted
     }
 
     ArrayList<Object> leafBuckets = null;    // "_l" missing buckets specified by bucket value only (no need to specify anything further)
-    ArrayList<Object> missingBuckets = null; // "_m" missing buckets that need to specify values for partial facets.. each entry is [bucketval, subs]
+    ArrayList<Object> partialBuckets = null; // "_p" missing buckets that have a partial sub-facet that need to specify those bucket values... each entry is [bucketval, subs]
     ArrayList<Object> skipBuckets = null;    // "_s" present buckets that we need to recurse into because children facets have refinement requirements. each entry is [bucketval, subs]
 
     for (FacetBucket bucket : bucketList) {
@@ -208,12 +208,12 @@ abstract class FacetRequestSortedMerger<FacetRequestT extends FacetRequestSorted
           mcontext.setBucketWasMissing(prev);
 
           if (bucketRefinement != null) {
-            if (missingBuckets==null) missingBuckets = new ArrayList<>();
-            missingBuckets.add( Arrays.asList(bucket.bucketValue, bucketRefinement) );
+            if (partialBuckets==null) partialBuckets = new ArrayList<>();
+            partialBuckets.add( Arrays.asList(bucket.bucketValue, bucketRefinement) );
           }
         }
 
-        // if we didn't add to "_m" (missing), then we should add to "_l" (leaf missing)
+        // if we didn't add to "_p" (missing with partial sub-facets), then we should add to "_l" (missing leaf)
         if (bucketRefinement == null) {
           if (leafBuckets == null) leafBuckets = new ArrayList<>();
           leafBuckets.add(bucket.bucketValue);
@@ -231,12 +231,12 @@ abstract class FacetRequestSortedMerger<FacetRequestT extends FacetRequestSorted
     }
 
     // TODO: what if we don't need to refine any variable buckets, but we do need to contribute to numBuckets, missing, allBuckets, etc...
-    // because we were "missing".  That will be handled at a higher level (i.e. we'll be in someone's missing bucket?)
+    // because we were "partial".  That will be handled at a higher level (i.e. we'll be in someone's missing bucket?)
     // TODO: test with a sub-facet with a limit of 0 and something like a missing bucket
-    if (leafBuckets != null || missingBuckets != null || skipBuckets != null) {
+    if (leafBuckets != null || partialBuckets != null || skipBuckets != null) {
       refinement = new HashMap<>(3);
       if (leafBuckets != null) refinement.put("_l",leafBuckets);
-      if (missingBuckets != null) refinement.put("_m", missingBuckets);
+      if (partialBuckets != null) refinement.put("_p", partialBuckets);
       if (skipBuckets != null) refinement.put("_s", skipBuckets);
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/66bfdcbd/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
index 5c86347..b4b0220 100644
--- a/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
+++ b/solr/core/src/test/org/apache/solr/search/facet/TestJsonFacetRefinement.java
@@ -36,7 +36,6 @@ import org.noggit.ObjectBuilder;
 public class TestJsonFacetRefinement extends SolrTestCaseHS {
 
   private static SolrInstances servers;  // for distributed testing
-  private static int origTableSize;
 
   @BeforeClass
   public static void beforeTests() throws Exception {
@@ -209,13 +208,13 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
             "}"
     );
 
-    // for testing missing _m, we need a partial facet within a partial facet
+    // for testing partial _p, we need a partial facet within a partial facet
     doTestRefine("{top:{type:terms, field:Afield, refine:true, limit:1, facet:{x : {type:terms, field:X, limit:1, refine:true} } } }",
         "{top: {buckets:[{val:'A', count:2, x:{buckets:[{val:x1, count:5},{val:x2, count:3}]} } ] } }",
         "{top: {buckets:[{val:'B', count:1, x:{buckets:[{val:x2, count:4},{val:x3, count:2}]} } ] } }",
         null,
         "=={top: {" +
-            "_m:[  ['A' , {x:{_l:[x1]}} ]  ]" +
+            "_p:[  ['A' , {x:{_l:[x1]}} ]  ]" +
             "    }  " +
             "}"
     );
@@ -329,7 +328,7 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
             "}"
     );
 
-    // test that sibling facets and stats are included for _m buckets, but skipped for _s buckets
+    // test that sibling facets and stats are included for _p buckets, but skipped for _s buckets
     client.testJQ(params(p, "q", "*:*",
         "json.facet", "{" +
             " ab :{type:terms, field:${cat_s}, limit:1, overrequest:0, refine:true,  facet:{  xy:{type:terms, field:${xy_s}, limit:1, overrequest:0, refine:true}, qq:{query:'*:*'},ww:'sum(${num_d})'  }}" +
@@ -339,9 +338,9 @@ public class TestJsonFacetRefinement extends SolrTestCaseHS {
             "}"
         )
         , "facets=={ count:8" +
-            ", ab:{ buckets:[  {val:A, count:4, xy:{buckets:[ {val:X,count:3}]}    ,qq:{count:4}, ww:4.0 }]  }" +  // make sure qq and ww are included for _m buckets
+            ", ab:{ buckets:[  {val:A, count:4, xy:{buckets:[ {val:X,count:3}]}    ,qq:{count:4}, ww:4.0 }]  }" +  // make sure qq and ww are included for _p buckets
             ", allf:{ buckets:[ {count:8, val:all, cat:{buckets:[{val:A,count:4}]} ,qq:{count:8}, ww:2.0 }]  }" +  // make sure qq and ww are excluded (not calculated again in another phase) for _s buckets
-            ", ab2:{ buckets:[  {val:A, count:4, xy:{buckets:[ {val:X,count:3}]}    ,qq:{count:4}, ww:4.0 }]  }" +  // make sure qq and ww are included for _m buckets
+            ", ab2:{ buckets:[  {val:A, count:4, xy:{buckets:[ {val:X,count:3}]}    ,qq:{count:4}, ww:4.0 }]  }" +  // make sure qq and ww are included for _p buckets
             ", allf2:{ buckets:[ {count:8, val:all, cat:{buckets:[{val:A,count:4}]} ,qq:{count:8}, ww:2.0 }]  }" +  // make sure qq and ww are excluded (not calculated again in another phase) for _s buckets
             "}"
     );


[06/52] [abbrv] lucene-solr:jira/solr-9959: LUCENE-7743: Avoid calling new String(String).

Posted by ab...@apache.org.
LUCENE-7743: Avoid calling new String(String).


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/390ef9af
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/390ef9af
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/390ef9af

Branch: refs/heads/jira/solr-9959
Commit: 390ef9af9fa081c90370f69b001cf7ec83c8f1bb
Parents: c189139
Author: Adrien Grand <jp...@gmail.com>
Authored: Tue Mar 28 15:25:16 2017 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Tue Mar 28 15:35:04 2017 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                                           | 3 +++
 .../lucene/analysis/compound/hyphenation/TernaryTree.java    | 2 +-
 .../java/org/apache/lucene/analysis/cn/smart/Utility.java    | 8 ++++----
 .../src/test/org/apache/lucene/index/TestIndexWriter.java    | 2 +-
 .../test/org/apache/lucene/util/TestRamUsageEstimator.java   | 8 ++++----
 .../solr/handler/dataimport/AbstractDIHCacheTestCase.java    | 2 +-
 .../org/apache/solr/highlight/DefaultSolrHighlighter.java    | 3 +--
 solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java      | 4 ++--
 .../test/org/apache/solr/response/TestSolrQueryResponse.java | 2 +-
 .../solr/schema/TrieIntPrefixActsAsRangeQueryFieldType.java  | 2 +-
 10 files changed, 19 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/390ef9af/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index a8f24c7..92f01a9 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -90,6 +90,9 @@ Other
 * LUCENE-7751: Avoid boxing primitives only to call compareTo.
   (Daniel Jelinski via Adrien Grand)
 
+* LUCENE-7743: Never call new String(String).
+  (Daniel Jelinski via Adrien Grand)
+
 ======================= Lucene 6.5.0 =======================
 
 API Changes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/390ef9af/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java
index 6aeb69b..a331d2a 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java
@@ -516,7 +516,7 @@ public class TernaryTree implements Cloneable {
 
     @Override
     public String nextElement() {
-      String res = new String(curkey);
+      String res = curkey;
       cur = up();
       run();
       return res;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/390ef9af/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/Utility.java
----------------------------------------------------------------------
diff --git a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/Utility.java b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/Utility.java
index aca80e7..81ca52e 100644
--- a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/Utility.java
+++ b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/Utility.java
@@ -24,16 +24,16 @@ import org.apache.lucene.analysis.cn.smart.hhmm.SegTokenFilter; // for javadoc
  */
 public class Utility {
 
-  public static final char[] STRING_CHAR_ARRAY = new String("\u672a##\u4e32")
+  public static final char[] STRING_CHAR_ARRAY = "\u672a##\u4e32"
       .toCharArray();
 
-  public static final char[] NUMBER_CHAR_ARRAY = new String("\u672a##\u6570")
+  public static final char[] NUMBER_CHAR_ARRAY = "\u672a##\u6570"
       .toCharArray();
 
-  public static final char[] START_CHAR_ARRAY = new String("\u59cb##\u59cb")
+  public static final char[] START_CHAR_ARRAY = "\u59cb##\u59cb"
       .toCharArray();
 
-  public static final char[] END_CHAR_ARRAY = new String("\u672b##\u672b").toCharArray();
+  public static final char[] END_CHAR_ARRAY = "\u672b##\u672b".toCharArray();
 
   /**
    * Delimiters will be filtered to this character by {@link SegTokenFilter}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/390ef9af/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index d153ac3..660280b 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -1154,7 +1154,7 @@ public class TestIndexWriter extends LuceneTestCase {
     t.finish = true;
     t.join();
     if (t.failed) {
-      fail(new String(t.bytesLog.toString("UTF-8")));
+      fail(t.bytesLog.toString("UTF-8"));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/390ef9af/lucene/core/src/test/org/apache/lucene/util/TestRamUsageEstimator.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestRamUsageEstimator.java b/lucene/core/src/test/org/apache/lucene/util/TestRamUsageEstimator.java
index cf53c2d..34128ad 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestRamUsageEstimator.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestRamUsageEstimator.java
@@ -24,7 +24,7 @@ import java.util.Random;
 
 public class TestRamUsageEstimator extends LuceneTestCase {
   public void testSanity() {
-    assertTrue(sizeOf(new String("test string")) > shallowSizeOfInstance(String.class));
+    assertTrue(sizeOf("test string") > shallowSizeOfInstance(String.class));
 
     Holder holder = new Holder();
     holder.holder = new Holder("string2", 5000L);
@@ -37,9 +37,9 @@ public class TestRamUsageEstimator extends LuceneTestCase {
         shallowSizeOfInstance(Holder.class)         == shallowSizeOfInstance(HolderSubclass2.class));
 
     String[] strings = new String[] {
-        new String("test string"),
-        new String("hollow"), 
-        new String("catchmaster")
+        "test string",
+        "hollow",
+        "catchmaster"
     };
     assertTrue(sizeOf(strings) > shallowSizeOf(strings));
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/390ef9af/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java
index 2ef5a91..8c7109d 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java
@@ -47,7 +47,7 @@ public class AbstractDIHCacheTestCase {
   @Before
   public void setup() {
     try {
-      APPLE = new SerialClob(new String("Apples grow on trees and they are good to eat.").toCharArray());
+      APPLE = new SerialClob("Apples grow on trees and they are good to eat.".toCharArray());
     } catch (SQLException sqe) {
       Assert.fail("Could not Set up Test");
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/390ef9af/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
index 098e1f7..24304d0a 100644
--- a/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
+++ b/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
@@ -733,9 +733,8 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf
       if( alternateFieldLen <= 0 ){
         altList.add(encoder.encodeText(altText));
       } else{
-        //note: seemingly redundant new String(...) releases memory to the larger text. But is copying better?
         altList.add( len + altText.length() > alternateFieldLen ?
-            encoder.encodeText(new String(altText.substring( 0, alternateFieldLen - len ))) :
+            encoder.encodeText(altText.substring(0, alternateFieldLen - len)) :
             encoder.encodeText(altText) );
         len += altText.length();
         if( len >= alternateFieldLen ) break;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/390ef9af/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java b/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
index 8b29d48..dc999f1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkCLITest.java
@@ -272,7 +272,7 @@ public class ZkCLITest extends SolrTestCaseJ4 {
   @Test
   public void testGet() throws Exception {
     String getNode = "/getNode";
-    byte [] data = new String("getNode-data").getBytes(StandardCharsets.UTF_8);
+    byte [] data = "getNode-data".getBytes(StandardCharsets.UTF_8);
     this.zkClient.create(getNode, data, CreateMode.PERSISTENT, true);
     String[] args = new String[] {"-zkhost", zkServer.getZkAddress(), "-cmd",
         "get", getNode};
@@ -284,7 +284,7 @@ public class ZkCLITest extends SolrTestCaseJ4 {
     File tmpDir = createTempDir().toFile();
     
     String getNode = "/getFileNode";
-    byte [] data = new String("getFileNode-data").getBytes(StandardCharsets.UTF_8);
+    byte [] data = "getFileNode-data".getBytes(StandardCharsets.UTF_8);
     this.zkClient.create(getNode, data, CreateMode.PERSISTENT, true);
 
     File file = new File(tmpDir,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/390ef9af/solr/core/src/test/org/apache/solr/response/TestSolrQueryResponse.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/response/TestSolrQueryResponse.java b/solr/core/src/test/org/apache/solr/response/TestSolrQueryResponse.java
index 8b17dc6..046582f 100644
--- a/solr/core/src/test/org/apache/solr/response/TestSolrQueryResponse.java
+++ b/solr/core/src/test/org/apache/solr/response/TestSolrQueryResponse.java
@@ -79,7 +79,7 @@ public class TestSolrQueryResponse extends LuceneTestCase {
     final SolrQueryResponse response = new SolrQueryResponse();
     assertEquals("response initial value", null, response.getResponse());
     final Object newValue = (random().nextBoolean()
-        ? (random().nextBoolean() ? new String("answer") : new Integer(42)) : null);
+        ? (random().nextBoolean() ? "answer" : Integer.valueOf(42)) : null);
     response.addResponse(newValue);
     assertEquals("response new value", newValue, response.getResponse());
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/390ef9af/solr/core/src/test/org/apache/solr/schema/TrieIntPrefixActsAsRangeQueryFieldType.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/TrieIntPrefixActsAsRangeQueryFieldType.java b/solr/core/src/test/org/apache/solr/schema/TrieIntPrefixActsAsRangeQueryFieldType.java
index a58c268..4b79975 100644
--- a/solr/core/src/test/org/apache/solr/schema/TrieIntPrefixActsAsRangeQueryFieldType.java
+++ b/solr/core/src/test/org/apache/solr/schema/TrieIntPrefixActsAsRangeQueryFieldType.java
@@ -26,7 +26,7 @@ import org.apache.solr.search.QParser;
 public class TrieIntPrefixActsAsRangeQueryFieldType extends TrieIntField {
 
   public Query getPrefixQuery(QParser parser, SchemaField sf, String termStr) {
-    return getRangeQuery(parser, sf, termStr, new String(Integer.MAX_VALUE + ""), true, false);
+    return getRangeQuery(parser, sf, termStr, Integer.MAX_VALUE + "", true, false);
   }
 
 }


[05/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10369: bin\solr.cmd delete and healthcheck now works again (fixed continuation chars ^)

Posted by ab...@apache.org.
SOLR-10369: bin\solr.cmd delete and healthcheck now works again (fixed continuation chars ^)


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/68eb078a
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/68eb078a
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/68eb078a

Branch: refs/heads/jira/solr-9959
Commit: 68eb078af854fbb923dee4a93541c7ff0fd0b982
Parents: b46b827
Author: Jan H�ydahl <ja...@apache.org>
Authored: Tue Mar 28 14:24:09 2017 +0200
Committer: Jan H�ydahl <ja...@apache.org>
Committed: Tue Mar 28 14:24:09 2017 +0200

----------------------------------------------------------------------
 solr/CHANGES.txt  | 2 ++
 solr/bin/solr.cmd | 4 ++--
 2 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/68eb078a/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 3403c90..b68c62f 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -139,6 +139,8 @@ Bug Fixes
 
 * SOLR-10362: "Memory Pool not found" error when reporting JVM metrics. (ab)
 
+* SOLR-10369: bin\solr.cmd delete and healthcheck now works again; fixed continuation chars ^ (Luis Goes via janhoy)
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/68eb078a/solr/bin/solr.cmd
----------------------------------------------------------------------
diff --git a/solr/bin/solr.cmd b/solr/bin/solr.cmd
index ee76a21..2aa4f00 100644
--- a/solr/bin/solr.cmd
+++ b/solr/bin/solr.cmd
@@ -1234,7 +1234,7 @@ goto parse_healthcheck_args
 :run_healthcheck
 IF NOT DEFINED HEALTHCHECK_COLLECTION goto healthcheck_usage
 IF NOT DEFINED HEALTHCHECK_ZK_HOST set "HEALTHCHECK_ZK_HOST=localhost:9983"
-"%JAVA%" %SOLR_SSL_OPTS% %AUTHC_OPTS% %SOLR_ZK_CREDS_AND_ACLS% -Dsolr.install.dir="%SOLR_TIP%" ^ 
+"%JAVA%" %SOLR_SSL_OPTS% %AUTHC_OPTS% %SOLR_ZK_CREDS_AND_ACLS% -Dsolr.install.dir="%SOLR_TIP%" ^
   -Dlog4j.configuration="file:%DEFAULT_SERVER_DIR%\scripts\cloud-scripts\log4j.properties" ^
   -classpath "%DEFAULT_SERVER_DIR%\solr-webapp\webapp\WEB-INF\lib\*;%DEFAULT_SERVER_DIR%\lib\ext\*" ^
   org.apache.solr.util.SolrCLI healthcheck -collection !HEALTHCHECK_COLLECTION! -zkHost !HEALTHCHECK_ZK_HOST!
@@ -1428,7 +1428,7 @@ if "!DELETE_CONFIG!"=="" (
   set DELETE_CONFIG=true
 )
 
-"%JAVA%" %SOLR_SSL_OPTS% %AUTHC_OPTS% %SOLR_ZK_CREDS_AND_ACLS% -Dsolr.install.dir="%SOLR_TIP%" ^ 
+"%JAVA%" %SOLR_SSL_OPTS% %AUTHC_OPTS% %SOLR_ZK_CREDS_AND_ACLS% -Dsolr.install.dir="%SOLR_TIP%" ^
 -Dlog4j.configuration="file:%DEFAULT_SERVER_DIR%\scripts\cloud-scripts\log4j.properties" ^
 -classpath "%DEFAULT_SERVER_DIR%\solr-webapp\webapp\WEB-INF\lib\*;%DEFAULT_SERVER_DIR%\lib\ext\*" ^
 org.apache.solr.util.SolrCLI delete -name !DELETE_NAME! -deleteConfig !DELETE_CONFIG! ^


[48/52] [abbrv] lucene-solr:jira/solr-9959: Merge branch 'master' into jira/solr-9959

Posted by ab...@apache.org.
Merge branch 'master' into jira/solr-9959


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/04a71229
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/04a71229
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/04a71229

Branch: refs/heads/jira/solr-9959
Commit: 04a71229303c0b6e19c56b556a7b46387325a43f
Parents: 5be37ef 99af830
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Mon Apr 3 12:56:34 2017 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Mon Apr 3 12:56:34 2017 +0200

----------------------------------------------------------------------
 .gitignore                                      |    1 -
 dev-tools/doap/lucene.rdf                       |    7 +
 dev-tools/doap/solr.rdf                         |    7 +
 dev-tools/idea/.idea/ant.xml                    |    3 -
 .../libraries/Solr_morphlines_cell_library.xml  |   10 -
 .../libraries/Solr_morphlines_core_library.xml  |   10 -
 .../Solr_morphlines_core_test_library.xml       |   10 -
 dev-tools/idea/.idea/modules.xml                |    3 -
 dev-tools/idea/.idea/workspace.xml              |   39 +-
 .../idea/solr/contrib/map-reduce/map-reduce.iml |   43 -
 .../contrib/morphlines-cell/morphlines-cell.iml |   29 -
 .../contrib/morphlines-core/morphlines-core.iml |   28 -
 dev-tools/maven/pom.xml.template                |    1 +
 .../solr/contrib/map-reduce/pom.xml.template    |   90 -
 .../contrib/morphlines-cell/pom.xml.template    |   90 -
 .../contrib/morphlines-core/pom.xml.template    |   91 -
 dev-tools/maven/solr/contrib/pom.xml.template   |    3 -
 lucene/CHANGES.txt                              |   30 +-
 .../charfilter/HTMLStripCharFilter.java         |    2 +-
 .../charfilter/HTMLStripCharFilter.jflex        |    7 +-
 .../compound/hyphenation/TernaryTree.java       |    2 +-
 .../lucene/analysis/shingle/ShingleFilter.java  |    2 +-
 .../analysis/hunspell/TestDictionary.java       |    2 +-
 .../miscellaneous/TestWordDelimiterFilter.java  |    2 +-
 .../TestWordDelimiterGraphFilter.java           |    2 +-
 .../payloads/NumericPayloadTokenFilterTest.java |    2 +-
 .../payloads/TypeAsPayloadTokenFilterTest.java  |    2 +-
 .../analysis/sinks/TestTeeSinkTokenFilter.java  |    4 +-
 .../lucene/analysis/snowball/TestSnowball.java  |    2 +-
 .../standard/TestUAX29URLEmailTokenizer.java    |    4 +-
 .../lucene/analysis/icu/ICUTransformFilter.java |    2 +-
 .../lucene/analysis/cn/smart/Utility.java       |    8 +-
 .../index/TestBackwardsCompatibility.java       |    4 +-
 .../org/apache/lucene/index/index.6.5.0-cfs.zip |  Bin 0 -> 15832 bytes
 .../apache/lucene/index/index.6.5.0-nocfs.zip   |  Bin 0 -> 15836 bytes
 .../byTask/feeds/EnwikiContentSourceTest.java   |    4 +-
 .../CachingNaiveBayesClassifierTest.java        |    2 +-
 .../SimpleNaiveBayesClassifierTest.java         |    2 +-
 .../bloom/BloomFilteringPostingsFormat.java     |    8 +-
 .../codecs/memory/MemoryPostingsFormat.java     |    2 +-
 .../simpletext/SimpleTextTermVectorsReader.java |    2 +-
 lucene/common-build.xml                         |    3 +
 .../CompressingTermVectorsReader.java           |    2 +-
 .../apache/lucene/index/IndexReaderContext.java |   10 +-
 .../org/apache/lucene/index/IndexWriter.java    |    2 +-
 .../apache/lucene/index/IndexWriterConfig.java  |    2 +-
 .../apache/lucene/index/ParallelLeafReader.java |    2 +-
 .../lucene/search/ConstantScoreQuery.java       |    2 +-
 .../org/apache/lucene/search/IndexSearcher.java |    1 -
 .../org/apache/lucene/search/ReqExclScorer.java |    2 +-
 .../org/apache/lucene/util/VirtualMethod.java   |    2 +-
 .../org/apache/lucene/index/TestCodecs.java     |    8 +-
 .../apache/lucene/index/TestCustomNorms.java    |   14 +-
 .../index/TestFlushByRamOrCountsPolicy.java     |    2 +-
 .../apache/lucene/index/TestIndexWriter.java    |    2 +-
 .../lucene/index/TestIndexWriterExceptions.java |    2 +-
 .../lucene/index/TestIndexWriterMerging.java    |    2 +-
 .../index/TestIndexWriterWithThreads.java       |    2 +-
 .../apache/lucene/index/TestIndexableField.java |    2 +-
 .../lucene/index/TestMaxTermFrequency.java      |    2 +-
 .../test/org/apache/lucene/index/TestNorms.java |   14 +-
 .../lucene/index/TestPerSegmentDeletes.java     |    2 +-
 .../lucene/index/TestSameTokenSamePosition.java |    4 +-
 .../lucene/index/TestStressIndexing2.java       |    2 +-
 .../lucene/index/TestTermVectorsReader.java     |    2 +-
 .../lucene/index/TestTransactionRollback.java   |    6 +-
 .../lucene/index/TestUniqueTermCount.java       |    2 +-
 .../lucene/search/TestAutomatonQuery.java       |    2 +-
 .../search/TestAutomatonQueryUnicode.java       |    2 +-
 .../lucene/search/TestCustomSearcherSort.java   |    4 +-
 .../TestEarlyTerminatingSortingCollector.java   |   16 +-
 .../apache/lucene/search/TestPrefixRandom.java  |    4 +-
 .../apache/lucene/search/TestRegexpQuery.java   |    2 +-
 .../apache/lucene/search/TestRegexpRandom2.java |    2 +-
 .../lucene/search/TestSearchWithThreads.java    |    2 +-
 .../lucene/search/TestSimilarityProvider.java   |    6 +-
 .../apache/lucene/search/TestTermScorer.java    |    2 +-
 .../search/TestTimeLimitingCollector.java       |    2 +-
 .../apache/lucene/store/TestLockFactory.java    |    4 +-
 .../apache/lucene/store/TestRAMDirectory.java   |   26 +-
 .../lucene/util/TestCloseableThreadLocal.java   |    2 +-
 .../apache/lucene/util/TestQueryBuilder.java    |    2 +-
 .../lucene/util/TestRamUsageEstimator.java      |    8 +-
 .../org/apache/lucene/util/fst/TestFSTs.java    |    2 +-
 .../taxonomy/writercache/CollisionMap.java      |    2 +-
 .../grouping/DistinctValuesCollectorTest.java   |   48 +-
 .../grouping/GroupFacetCollectorTest.java       |    4 +-
 .../uhighlight/LengthGoalBreakIteratorTest.java |   26 +-
 .../TestUnifiedHighlighterExtensibility.java    |    2 +-
 .../vectorhighlight/AbstractTestCase.java       |    6 +-
 lucene/ivy-ignore-conflicts.properties          |    1 -
 lucene/ivy-versions.properties                  |   59 -
 .../lucene/search/join/GlobalOrdinalsQuery.java |   16 +-
 .../join/GlobalOrdinalsWithScoreQuery.java      |   16 +-
 .../org/apache/lucene/search/join/JoinUtil.java |    8 +-
 .../search/DiversifiedTopDocsCollector.java     |    5 +-
 .../apache/lucene/queries/CustomScoreQuery.java |    2 +-
 .../lucene/queries/TestCustomScoreQuery.java    |    2 +-
 .../queries/payloads/TestPayloadSpans.java      |    6 +-
 .../surround/parser/QueryParser.java            |   41 +-
 .../queryparser/surround/parser/QueryParser.jj  |   45 +-
 .../queryparser/surround/query/FieldsQuery.java |    4 +-
 .../queryparser/classic/TestMultiAnalyzer.java  |    8 +-
 .../classic/TestMultiFieldQueryParser.java      |    2 +-
 .../standard/TestMultiAnalyzerQPHelper.java     |    8 +-
 .../flexible/standard/TestQPHelper.java         |    8 +-
 .../queryparser/util/QueryParserTestBase.java   |   10 +-
 .../sandbox/queries/FuzzyLikeThisQuery.java     |    2 +-
 .../lucene/payloads/TestPayloadSpanUtil.java    |    4 +-
 .../lucene/spatial3d/geom/GeoBBoxTest.java      |    2 +-
 .../search/spell/WordBreakSpellChecker.java     |   10 +-
 .../lucene/search/suggest/FileDictionary.java   |    8 +-
 .../search/suggest/document/ContextQuery.java   |    2 +-
 .../search/suggest/document/SuggestField.java   |    2 +-
 .../search/suggest/fst/ExternalRefSorter.java   |    2 +-
 .../suggest/fst/WFSTCompletionLookup.java       |    2 +-
 .../search/suggest/jaspell/JaspellLookup.java   |    8 +-
 .../jaspell/JaspellTernarySearchTrie.java       |    2 +-
 .../search/suggest/DocumentDictionaryTest.java  |    2 +-
 .../analyzing/AnalyzingInfixSuggesterTest.java  |    2 +-
 .../suggest/analyzing/FuzzySuggesterTest.java   |    2 +-
 .../mockrandom/MockRandomPostingsFormat.java    |    2 +-
 .../lucene/store/BaseLockFactoryTestCase.java   |    2 +-
 .../dependencies/GetMavenDependenciesTask.java  |    2 +-
 .../lucene/validation/LibVersionsCheckTask.java |    2 +-
 solr/CHANGES.txt                                |   71 +-
 solr/README.txt                                 |    2 +-
 solr/bin/solr                                   |   32 +-
 solr/bin/solr.cmd                               |   31 +-
 .../request/AnalyticsContentHandler.java        |   44 +-
 .../solr/analytics/request/AnalyticsStats.java  |    2 +-
 .../handler/clustering/ClusteringComponent.java |   64 +-
 .../clustering/solr/collection1/conf/schema.xml |   40 +-
 .../solr/collection1/conf/synonyms.txt          |    2 +-
 .../clustering/ClusteringComponentTest.java     |   68 +-
 .../carrot2/CarrotClusteringEngineTest.java     |    3 +-
 .../handler/dataimport/MailEntityProcessor.java |    4 +-
 .../conf/dataimport-schema-no-unique-key.xml    |   11 +-
 .../dataimport/EntityProcessorWrapper.java      |   23 +
 .../handler/dataimport/VariableResolver.java    |    2 +-
 .../handler/dataimport/XPathRecordReader.java   |   10 +-
 .../solr/collection1/conf/dataimport-schema.xml |   17 +-
 .../conf/dataimport-solr_id-schema.xml          |   29 +-
 .../dataimport/AbstractDIHCacheTestCase.java    |    4 +-
 .../AbstractSqlEntityProcessorTestCase.java     |    4 +-
 .../dataimport/TestContentStreamDataSource.java |    2 +-
 .../handler/dataimport/TestDocBuilder2.java     |   48 +-
 .../dataimport/TestHierarchicalDocBuilder.java  |   16 +-
 .../handler/extraction/XLSXResponseWriter.java  |    2 +-
 .../extraction/solr/collection1/conf/schema.xml |   61 +-
 .../org/apache/solr/ltr/LTRScoringQuery.java    |    2 +-
 .../test-files/solr/collection1/conf/schema.xml |    2 +-
 .../solr/collection1/conf/synonyms.txt          |    2 +-
 .../solr/ltr/TestLTRReRankingPipeline.java      |    2 +-
 solr/contrib/map-reduce/README.txt              |   20 -
 solr/contrib/map-reduce/build.xml               |  157 -
 solr/contrib/map-reduce/ivy.xml                 |   37 -
 .../map-reduce/src/java/assembly/hadoop-job.xml |   39 -
 .../solr/hadoop/AlphaNumericComparator.java     |   75 -
 .../org/apache/solr/hadoop/BatchWriter.java     |  243 --
 .../solr/hadoop/DataInputInputStream.java       |   58 -
 .../solr/hadoop/DataOutputOutputStream.java     |   66 -
 .../solr/hadoop/DryRunDocumentLoader.java       |   57 -
 .../src/java/org/apache/solr/hadoop/GoLive.java |  182 -
 .../apache/solr/hadoop/HdfsFileFieldNames.java  |   41 -
 .../org/apache/solr/hadoop/HeartBeater.java     |  159 -
 .../solr/hadoop/LineRandomizerMapper.java       |   67 -
 .../solr/hadoop/LineRandomizerReducer.java      |   48 -
 .../solr/hadoop/MapReduceIndexerTool.java       | 1388 -------
 .../apache/solr/hadoop/PathArgumentType.java    |  233 --
 .../java/org/apache/solr/hadoop/PathParts.java  |  130 -
 .../solr/hadoop/SolrCloudPartitioner.java       |  143 -
 .../org/apache/solr/hadoop/SolrCounters.java    |   53 -
 .../solr/hadoop/SolrInputDocumentWritable.java  |   66 -
 .../java/org/apache/solr/hadoop/SolrMapper.java |   39 -
 .../apache/solr/hadoop/SolrOutputFormat.java    |  280 --
 .../apache/solr/hadoop/SolrRecordWriter.java    |  479 ---
 .../org/apache/solr/hadoop/SolrReducer.java     |  188 -
 .../solr/hadoop/ToolRunnerHelpFormatter.java    |   90 -
 .../org/apache/solr/hadoop/TreeMergeMapper.java |   46 -
 .../solr/hadoop/TreeMergeOutputFormat.java      |  201 -
 .../hadoop/UnbufferedDataInputInputStream.java  |  114 -
 .../src/java/org/apache/solr/hadoop/Utils.java  |   59 -
 .../apache/solr/hadoop/ZooKeeperInspector.java  |  213 -
 .../dedup/NoChangeUpdateConflictResolver.java   |   36 -
 .../dedup/RejectingUpdateConflictResolver.java  |   48 -
 .../RetainMostRecentUpdateConflictResolver.java |  114 -
 .../dedup/SolrInputDocumentComparator.java      |   84 -
 .../dedup/SortingUpdateConflictResolver.java    |   79 -
 .../hadoop/dedup/UpdateConflictResolver.java    |   71 -
 .../apache/solr/hadoop/dedup/package-info.java  |   25 -
 .../hadoop/morphline/MorphlineCounters.java     |   47 -
 .../hadoop/morphline/MorphlineMapRunner.java    |  268 --
 .../solr/hadoop/morphline/MorphlineMapper.java  |  193 -
 .../solr/hadoop/morphline/package-info.java     |   25 -
 .../org/apache/solr/hadoop/package-info.java    |   25 -
 solr/contrib/map-reduce/src/java/overview.html  |   21 -
 .../map-reduce/src/test-files/README.txt        |    1 -
 .../solr/hadoop/AlphaNumericComparatorTest.java |   46 -
 .../org/apache/solr/hadoop/IdentityMapper.java  |   38 -
 .../org/apache/solr/hadoop/IdentityReducer.java |   37 -
 .../hadoop/LineRandomizerMapperReducerTest.java |   94 -
 .../test/org/apache/solr/hadoop/MRUnitBase.java |   64 -
 .../MapReduceIndexerToolArgumentParserTest.java |  468 ---
 .../solr/hadoop/MorphlineBasicMiniMRTest.java   |  415 --
 .../solr/hadoop/MorphlineGoLiveMiniMRTest.java  |  881 ----
 .../apache/solr/hadoop/MorphlineMapperTest.java |   76 -
 .../solr/hadoop/MorphlineReducerTest.java       |  131 -
 .../org/apache/solr/hadoop/UtilsForTests.java   |   57 -
 .../solr/hadoop/hack/MiniMRClientCluster.java   |   41 -
 .../hadoop/hack/MiniMRClientClusterFactory.java |   88 -
 .../apache/solr/hadoop/hack/MiniMRCluster.java  |  266 --
 .../solr/hadoop/hack/MiniMRYarnCluster.java     |  205 -
 .../hadoop/hack/MiniMRYarnClusterAdapter.java   |   78 -
 .../solr/hadoop/hack/MiniYARNCluster.java       |  409 --
 solr/contrib/morphlines-cell/README.txt         |    6 -
 solr/contrib/morphlines-cell/build.xml          |  144 -
 solr/contrib/morphlines-cell/ivy.xml            |   35 -
 .../solr/morphlines/cell/SolrCellBuilder.java   |  348 --
 .../StripNonCharSolrContentHandlerFactory.java  |   81 -
 .../cell/TrimSolrContentHandlerFactory.java     |   58 -
 .../solr/morphlines/cell/package-info.java      |   25 -
 .../morphlines-cell/src/java/overview.html      |   21 -
 .../morphlines-cell/src/test-files/README.txt   |    1 -
 .../morphlines/cell/SolrCellMorphlineTest.java  |  292 --
 solr/contrib/morphlines-core/README.txt         |    6 -
 solr/contrib/morphlines-core/build.xml          |  105 -
 solr/contrib/morphlines-core/ivy.xml            |  128 -
 .../solr/morphlines/solr/DocumentLoader.java    |   73 -
 .../apache/solr/morphlines/solr/FileUtils.java  |  140 -
 .../solr/GenerateSolrSequenceKeyBuilder.java    |  143 -
 .../solr/morphlines/solr/LoadSolrBuilder.java   |  153 -
 .../solr/SafeConcurrentUpdateSolrClient.java    |   70 -
 .../solr/SanitizeUnknownSolrFieldsBuilder.java  |  101 -
 .../solr/SolrClientDocumentLoader.java          |  124 -
 .../solr/morphlines/solr/SolrLocator.java       |  254 --
 .../morphlines/solr/SolrMorphlineContext.java   |   80 -
 .../morphlines/solr/TokenizeTextBuilder.java    |  154 -
 .../morphlines/solr/ZooKeeperDownloader.java    |  142 -
 .../solr/morphlines/solr/package-info.java      |   25 -
 .../morphlines-core/src/java/overview.html      |   21 -
 .../src/test-files/custom-mimetypes.xml         |   38 -
 .../src/test-files/log4j.properties             |    7 -
 .../src/test-files/morphlines-core.marker       |    1 -
 .../solr/collection1/conf/currency.xml          |   67 -
 .../solr/collection1/conf/elevate.xml           |   42 -
 .../collection1/conf/lang/contractions_ca.txt   |    8 -
 .../collection1/conf/lang/contractions_fr.txt   |    9 -
 .../collection1/conf/lang/contractions_ga.txt   |    5 -
 .../collection1/conf/lang/contractions_it.txt   |   23 -
 .../collection1/conf/lang/hyphenations_ga.txt   |    5 -
 .../solr/collection1/conf/lang/stemdict_nl.txt  |    6 -
 .../solr/collection1/conf/lang/stoptags_ja.txt  |  420 --
 .../solr/collection1/conf/lang/stopwords_ar.txt |  125 -
 .../solr/collection1/conf/lang/stopwords_bg.txt |  193 -
 .../solr/collection1/conf/lang/stopwords_ca.txt |  220 -
 .../solr/collection1/conf/lang/stopwords_cz.txt |  172 -
 .../solr/collection1/conf/lang/stopwords_da.txt |  108 -
 .../solr/collection1/conf/lang/stopwords_de.txt |  292 --
 .../solr/collection1/conf/lang/stopwords_el.txt |   78 -
 .../solr/collection1/conf/lang/stopwords_en.txt |   54 -
 .../solr/collection1/conf/lang/stopwords_es.txt |  354 --
 .../solr/collection1/conf/lang/stopwords_eu.txt |   99 -
 .../solr/collection1/conf/lang/stopwords_fa.txt |  313 --
 .../solr/collection1/conf/lang/stopwords_fi.txt |   95 -
 .../solr/collection1/conf/lang/stopwords_fr.txt |  183 -
 .../solr/collection1/conf/lang/stopwords_ga.txt |  110 -
 .../solr/collection1/conf/lang/stopwords_gl.txt |  161 -
 .../solr/collection1/conf/lang/stopwords_hi.txt |  235 --
 .../solr/collection1/conf/lang/stopwords_hu.txt |  209 -
 .../solr/collection1/conf/lang/stopwords_hy.txt |   46 -
 .../solr/collection1/conf/lang/stopwords_id.txt |  359 --
 .../solr/collection1/conf/lang/stopwords_it.txt |  301 --
 .../solr/collection1/conf/lang/stopwords_ja.txt |  127 -
 .../solr/collection1/conf/lang/stopwords_lv.txt |  172 -
 .../solr/collection1/conf/lang/stopwords_nl.txt |  117 -
 .../solr/collection1/conf/lang/stopwords_no.txt |  192 -
 .../solr/collection1/conf/lang/stopwords_pt.txt |  251 --
 .../solr/collection1/conf/lang/stopwords_ro.txt |  233 --
 .../solr/collection1/conf/lang/stopwords_ru.txt |  241 --
 .../solr/collection1/conf/lang/stopwords_sv.txt |  131 -
 .../solr/collection1/conf/lang/stopwords_th.txt |  119 -
 .../solr/collection1/conf/lang/stopwords_tr.txt |  212 -
 .../solr/collection1/conf/lang/userdict_ja.txt  |   29 -
 .../solr/collection1/conf/protwords.txt         |   21 -
 .../test-files/solr/collection1/conf/schema.xml |  927 -----
 .../solr/collection1/conf/solrconfig.xml        | 1426 -------
 .../solr/collection1/conf/stopwords.txt         |   14 -
 .../solr/collection1/conf/synonyms.txt          |   29 -
 .../test-files/solr/minimr/conf/currency.xml    |   67 -
 .../src/test-files/solr/minimr/conf/elevate.xml |   42 -
 .../solr/minimr/conf/lang/contractions_ca.txt   |    8 -
 .../solr/minimr/conf/lang/contractions_fr.txt   |    9 -
 .../solr/minimr/conf/lang/contractions_ga.txt   |    5 -
 .../solr/minimr/conf/lang/contractions_it.txt   |   23 -
 .../solr/minimr/conf/lang/hyphenations_ga.txt   |    5 -
 .../solr/minimr/conf/lang/stemdict_nl.txt       |    6 -
 .../solr/minimr/conf/lang/stoptags_ja.txt       |  420 --
 .../solr/minimr/conf/lang/stopwords_ar.txt      |  125 -
 .../solr/minimr/conf/lang/stopwords_bg.txt      |  193 -
 .../solr/minimr/conf/lang/stopwords_ca.txt      |  220 -
 .../solr/minimr/conf/lang/stopwords_cz.txt      |  172 -
 .../solr/minimr/conf/lang/stopwords_da.txt      |  108 -
 .../solr/minimr/conf/lang/stopwords_de.txt      |  292 --
 .../solr/minimr/conf/lang/stopwords_el.txt      |   78 -
 .../solr/minimr/conf/lang/stopwords_en.txt      |   54 -
 .../solr/minimr/conf/lang/stopwords_es.txt      |  354 --
 .../solr/minimr/conf/lang/stopwords_eu.txt      |   99 -
 .../solr/minimr/conf/lang/stopwords_fa.txt      |  313 --
 .../solr/minimr/conf/lang/stopwords_fi.txt      |   95 -
 .../solr/minimr/conf/lang/stopwords_fr.txt      |  183 -
 .../solr/minimr/conf/lang/stopwords_ga.txt      |  110 -
 .../solr/minimr/conf/lang/stopwords_gl.txt      |  161 -
 .../solr/minimr/conf/lang/stopwords_hi.txt      |  235 --
 .../solr/minimr/conf/lang/stopwords_hu.txt      |  209 -
 .../solr/minimr/conf/lang/stopwords_hy.txt      |   46 -
 .../solr/minimr/conf/lang/stopwords_id.txt      |  359 --
 .../solr/minimr/conf/lang/stopwords_it.txt      |  301 --
 .../solr/minimr/conf/lang/stopwords_ja.txt      |  127 -
 .../solr/minimr/conf/lang/stopwords_lv.txt      |  172 -
 .../solr/minimr/conf/lang/stopwords_nl.txt      |  117 -
 .../solr/minimr/conf/lang/stopwords_no.txt      |  192 -
 .../solr/minimr/conf/lang/stopwords_pt.txt      |  251 --
 .../solr/minimr/conf/lang/stopwords_ro.txt      |  233 --
 .../solr/minimr/conf/lang/stopwords_ru.txt      |  241 --
 .../solr/minimr/conf/lang/stopwords_sv.txt      |  131 -
 .../solr/minimr/conf/lang/stopwords_th.txt      |  119 -
 .../solr/minimr/conf/lang/stopwords_tr.txt      |  212 -
 .../solr/minimr/conf/lang/userdict_ja.txt       |   29 -
 .../test-files/solr/minimr/conf/protwords.txt   |   21 -
 .../src/test-files/solr/minimr/conf/schema.xml  |  941 -----
 .../test-files/solr/minimr/conf/solrconfig.xml  | 1446 -------
 .../test-files/solr/minimr/conf/stopwords.txt   |   14 -
 .../test-files/solr/minimr/conf/synonyms.txt    |   29 -
 .../src/test-files/solr/minimr/solr.xml         |   46 -
 .../test-files/solr/mrunit/conf/currency.xml    |   67 -
 .../src/test-files/solr/mrunit/conf/elevate.xml |   42 -
 .../solr/mrunit/conf/lang/contractions_ca.txt   |    8 -
 .../solr/mrunit/conf/lang/contractions_fr.txt   |    9 -
 .../solr/mrunit/conf/lang/contractions_ga.txt   |    5 -
 .../solr/mrunit/conf/lang/contractions_it.txt   |   23 -
 .../solr/mrunit/conf/lang/hyphenations_ga.txt   |    5 -
 .../solr/mrunit/conf/lang/stemdict_nl.txt       |    6 -
 .../solr/mrunit/conf/lang/stoptags_ja.txt       |  420 --
 .../solr/mrunit/conf/lang/stopwords_ar.txt      |  125 -
 .../solr/mrunit/conf/lang/stopwords_bg.txt      |  193 -
 .../solr/mrunit/conf/lang/stopwords_ca.txt      |  220 -
 .../solr/mrunit/conf/lang/stopwords_cz.txt      |  172 -
 .../solr/mrunit/conf/lang/stopwords_da.txt      |  108 -
 .../solr/mrunit/conf/lang/stopwords_de.txt      |  292 --
 .../solr/mrunit/conf/lang/stopwords_el.txt      |   78 -
 .../solr/mrunit/conf/lang/stopwords_en.txt      |   54 -
 .../solr/mrunit/conf/lang/stopwords_es.txt      |  354 --
 .../solr/mrunit/conf/lang/stopwords_eu.txt      |   99 -
 .../solr/mrunit/conf/lang/stopwords_fa.txt      |  313 --
 .../solr/mrunit/conf/lang/stopwords_fi.txt      |   95 -
 .../solr/mrunit/conf/lang/stopwords_fr.txt      |  183 -
 .../solr/mrunit/conf/lang/stopwords_ga.txt      |  110 -
 .../solr/mrunit/conf/lang/stopwords_gl.txt      |  161 -
 .../solr/mrunit/conf/lang/stopwords_hi.txt      |  235 --
 .../solr/mrunit/conf/lang/stopwords_hu.txt      |  209 -
 .../solr/mrunit/conf/lang/stopwords_hy.txt      |   46 -
 .../solr/mrunit/conf/lang/stopwords_id.txt      |  359 --
 .../solr/mrunit/conf/lang/stopwords_it.txt      |  301 --
 .../solr/mrunit/conf/lang/stopwords_ja.txt      |  127 -
 .../solr/mrunit/conf/lang/stopwords_lv.txt      |  172 -
 .../solr/mrunit/conf/lang/stopwords_nl.txt      |  117 -
 .../solr/mrunit/conf/lang/stopwords_no.txt      |  192 -
 .../solr/mrunit/conf/lang/stopwords_pt.txt      |  251 --
 .../solr/mrunit/conf/lang/stopwords_ro.txt      |  233 --
 .../solr/mrunit/conf/lang/stopwords_ru.txt      |  241 --
 .../solr/mrunit/conf/lang/stopwords_sv.txt      |  131 -
 .../solr/mrunit/conf/lang/stopwords_th.txt      |  119 -
 .../solr/mrunit/conf/lang/stopwords_tr.txt      |  212 -
 .../solr/mrunit/conf/lang/userdict_ja.txt       |   29 -
 .../test-files/solr/mrunit/conf/protwords.txt   |   21 -
 .../src/test-files/solr/mrunit/conf/schema.xml  |  940 -----
 .../test-files/solr/mrunit/conf/solrconfig.xml  | 1449 -------
 .../test-files/solr/mrunit/conf/stopwords.txt   |   14 -
 .../test-files/solr/mrunit/conf/synonyms.txt    |   29 -
 .../src/test-files/solr/mrunit/solr.xml         |   45 -
 .../src/test-files/solr/solr.xml                |   34 -
 .../solrcelltest/collection1/conf/currency.xml  |   67 -
 .../solrcelltest/collection1/conf/elevate.xml   |   42 -
 .../collection1/conf/lang/contractions_ca.txt   |    8 -
 .../collection1/conf/lang/contractions_fr.txt   |    9 -
 .../collection1/conf/lang/contractions_ga.txt   |    5 -
 .../collection1/conf/lang/contractions_it.txt   |   23 -
 .../collection1/conf/lang/hyphenations_ga.txt   |    5 -
 .../collection1/conf/lang/stemdict_nl.txt       |    6 -
 .../collection1/conf/lang/stoptags_ja.txt       |  420 --
 .../collection1/conf/lang/stopwords_ar.txt      |  125 -
 .../collection1/conf/lang/stopwords_bg.txt      |  193 -
 .../collection1/conf/lang/stopwords_ca.txt      |  220 -
 .../collection1/conf/lang/stopwords_cz.txt      |  172 -
 .../collection1/conf/lang/stopwords_da.txt      |  108 -
 .../collection1/conf/lang/stopwords_de.txt      |  292 --
 .../collection1/conf/lang/stopwords_el.txt      |   78 -
 .../collection1/conf/lang/stopwords_en.txt      |   54 -
 .../collection1/conf/lang/stopwords_es.txt      |  354 --
 .../collection1/conf/lang/stopwords_eu.txt      |   99 -
 .../collection1/conf/lang/stopwords_fa.txt      |  313 --
 .../collection1/conf/lang/stopwords_fi.txt      |   95 -
 .../collection1/conf/lang/stopwords_fr.txt      |  183 -
 .../collection1/conf/lang/stopwords_ga.txt      |  110 -
 .../collection1/conf/lang/stopwords_gl.txt      |  161 -
 .../collection1/conf/lang/stopwords_hi.txt      |  235 --
 .../collection1/conf/lang/stopwords_hu.txt      |  209 -
 .../collection1/conf/lang/stopwords_hy.txt      |   46 -
 .../collection1/conf/lang/stopwords_id.txt      |  359 --
 .../collection1/conf/lang/stopwords_it.txt      |  301 --
 .../collection1/conf/lang/stopwords_ja.txt      |  127 -
 .../collection1/conf/lang/stopwords_lv.txt      |  172 -
 .../collection1/conf/lang/stopwords_nl.txt      |  117 -
 .../collection1/conf/lang/stopwords_no.txt      |  192 -
 .../collection1/conf/lang/stopwords_pt.txt      |  251 --
 .../collection1/conf/lang/stopwords_ro.txt      |  233 --
 .../collection1/conf/lang/stopwords_ru.txt      |  241 --
 .../collection1/conf/lang/stopwords_sv.txt      |  131 -
 .../collection1/conf/lang/stopwords_th.txt      |  119 -
 .../collection1/conf/lang/stopwords_tr.txt      |  212 -
 .../collection1/conf/lang/userdict_ja.txt       |   29 -
 .../solrcelltest/collection1/conf/protwords.txt |   21 -
 .../solrcelltest/collection1/conf/schema.xml    |  893 ----
 .../collection1/conf/solrconfig.xml             | 1426 -------
 .../solrcelltest/collection1/conf/stopwords.txt |   14 -
 .../solrcelltest/collection1/conf/synonyms.txt  |   29 -
 .../solr/solrcloud/conf/solrconfig.xml          | 1437 -------
 .../test-files/test-documents/NullHeader.docx   |  Bin 4355 -> 0 bytes
 .../test-files/test-documents/boilerplate.html  |   58 -
 .../src/test-files/test-documents/cars.csv      |    6 -
 .../src/test-files/test-documents/cars.csv.gz   |  Bin 167 -> 0 bytes
 .../src/test-files/test-documents/cars.tar.gz   |  Bin 298 -> 0 bytes
 .../src/test-files/test-documents/complex.mbox  |  291 --
 .../src/test-files/test-documents/email.eml     |   40 -
 .../src/test-files/test-documents/rsstest.rss   |   36 -
 .../sample-statuses-20120521-100919.avro        |  Bin 3192 -> 0 bytes
 .../sample-statuses-20120906-141433             |    4 -
 .../sample-statuses-20120906-141433-medium.avro |  Bin 249540 -> 0 bytes
 .../sample-statuses-20120906-141433.avro        |  Bin 1208 -> 0 bytes
 .../sample-statuses-20120906-141433.bz2         |  Bin 1054 -> 0 bytes
 .../sample-statuses-20120906-141433.gz          |  Bin 907 -> 0 bytes
 .../test-files/test-documents/test-outlook.msg  |  Bin 19968 -> 0 bytes
 .../src/test-files/test-documents/testAIFF.aif  |  Bin 3894 -> 0 bytes
 .../src/test-files/test-documents/testBMP.bmp   |  Bin 22554 -> 0 bytes
 .../src/test-files/test-documents/testBMPfp.txt |    3 -
 .../src/test-files/test-documents/testEMLX.emlx |   72 -
 .../src/test-files/test-documents/testEXCEL.xls |  Bin 13824 -> 0 bytes
 .../test-files/test-documents/testEXCEL.xlsx    |  Bin 9453 -> 0 bytes
 .../src/test-files/test-documents/testFLAC.flac |  Bin 10604 -> 0 bytes
 .../src/test-files/test-documents/testFLV.flv   |  Bin 90580 -> 0 bytes
 .../test-files/test-documents/testJPEG_EXIF.jpg |  Bin 16357 -> 0 bytes
 .../test-documents/testJPEG_EXIF.jpg.gz         |  Bin 8595 -> 0 bytes
 .../test-documents/testJPEG_EXIF.jpg.tar.gz     |  Bin 8722 -> 0 bytes
 .../test-files/test-documents/testMP3i18n.mp3   |  Bin 40832 -> 0 bytes
 .../src/test-files/test-documents/testMP4.m4a   |  Bin 4770 -> 0 bytes
 .../src/test-files/test-documents/testPDF.pdf   |  Bin 34824 -> 0 bytes
 .../src/test-files/test-documents/testPNG.png   |  Bin 17041 -> 0 bytes
 .../test-documents/testPPT_various.ppt          |  Bin 164352 -> 0 bytes
 .../test-documents/testPPT_various.pptx         |  Bin 56659 -> 0 bytes
 .../src/test-files/test-documents/testPSD.psd   |  Bin 69410 -> 0 bytes
 .../test-files/test-documents/testPages.pages   |  Bin 134152 -> 0 bytes
 .../src/test-files/test-documents/testRFC822    |   41 -
 .../test-documents/testRTFVarious.rtf           |  329 --
 .../src/test-files/test-documents/testSVG.svg   |   23 -
 .../src/test-files/test-documents/testTIFF.tif  |  Bin 25584 -> 0 bytes
 .../src/test-files/test-documents/testVISIO.vsd |  Bin 45568 -> 0 bytes
 .../src/test-files/test-documents/testWAV.wav   |  Bin 3884 -> 0 bytes
 .../test-documents/testWORD_various.doc         |  Bin 35328 -> 0 bytes
 .../src/test-files/test-documents/testXML.xml   |   48 -
 .../src/test-files/test-documents/testXML2.xml  |   22 -
 .../test-morphlines/loadSolrBasic.conf          |   68 -
 .../test-morphlines/solrCellDocumentTypes.conf  |  304 --
 .../test-morphlines/solrCellJPGCompressed.conf  |  150 -
 .../test-files/test-morphlines/solrCellXML.conf |   78 -
 .../test-morphlines/tokenizeText.conf           |   38 -
 .../tutorialReadAvroContainer.conf              |  145 -
 .../solr/AbstractSolrMorphlineTestBase.java     |  318 --
 .../solr/AbstractSolrMorphlineZkTestBase.java   |  141 -
 .../solr/CollectingDocumentLoader.java          |   95 -
 .../morphlines/solr/EmbeddedTestSolrServer.java |   46 -
 .../solr/morphlines/solr/SolrMorphlineTest.java |   69 -
 .../solr/SolrMorphlineZkAliasTest.java          |   96 -
 .../solr/SolrMorphlineZkAvroTest.java           |  140 -
 .../morphlines/solr/SolrMorphlineZkTest.java    |   92 -
 .../uima/solr/collection1/conf/schema.xml       |   47 +-
 .../uima/solr/collection1/conf/synonyms.txt     |    2 +-
 .../test-files/uima/uima-tokenizers-schema.xml  |   36 +-
 .../solr/response/VelocityResponseWriter.java   |    2 +-
 .../org/apache/solr/cloud/DistributedMap.java   |   10 +-
 .../java/org/apache/solr/cloud/Overseer.java    |    2 +-
 .../apache/solr/cloud/OverseerTaskQueue.java    |    8 +-
 .../org/apache/solr/cloud/RecoveryStrategy.java |    2 +-
 .../org/apache/solr/cloud/ZkController.java     |    6 +-
 .../apache/solr/cloud/ZkSolrResourceLoader.java |    2 +-
 .../solr/core/CachingDirectoryFactory.java      |    2 +-
 .../java/org/apache/solr/core/ConfigSet.java    |   10 +-
 .../org/apache/solr/core/ConfigSetService.java  |   29 +-
 .../org/apache/solr/core/CoreContainer.java     |    9 +-
 .../org/apache/solr/core/CoreDescriptor.java    |   17 +
 .../apache/solr/core/RunExecutableListener.java |   13 +-
 .../src/java/org/apache/solr/core/SolrCore.java |    1 +
 .../core/SolrCoreInitializationException.java   |   32 +
 .../apache/solr/handler/AnalyzeEvaluator.java   |  111 +
 .../org/apache/solr/handler/CdcrReplicator.java |    2 +-
 .../solr/handler/CdcrReplicatorState.java       |    6 +-
 .../apache/solr/handler/CdcrRequestHandler.java |    2 +-
 .../org/apache/solr/handler/ExportWriter.java   |   18 +-
 .../org/apache/solr/handler/IndexFetcher.java   |   85 +-
 .../apache/solr/handler/ReplicationHandler.java |   10 +-
 .../org/apache/solr/handler/SQLHandler.java     |    2 +-
 .../org/apache/solr/handler/StreamHandler.java  |   82 +-
 .../solr/handler/admin/ConfigSetsHandler.java   |   93 +-
 .../solr/handler/component/ExpandComponent.java |   81 +-
 .../component/HttpShardHandlerFactory.java      |    2 +-
 .../PivotFacetFieldValueCollection.java         |    4 +-
 .../handler/component/RealTimeGetComponent.java |   22 +-
 .../solr/handler/component/TermsComponent.java  |   68 +-
 .../apache/solr/handler/loader/XMLLoader.java   |    7 +
 .../apache/solr/handler/sql/SolrAggregate.java  |    6 +-
 .../apache/solr/handler/sql/SolrEnumerator.java |    1 +
 .../apache/solr/handler/sql/SolrProject.java    |    2 +-
 .../org/apache/solr/handler/sql/SolrRel.java    |   10 +-
 .../org/apache/solr/handler/sql/SolrSchema.java |    4 +
 .../org/apache/solr/handler/sql/SolrTable.java  |   43 +-
 .../handler/sql/SolrToEnumerableConverter.java  |    1 +
 .../solr/highlight/DefaultSolrHighlighter.java  |    5 +-
 .../apache/solr/highlight/SolrHighlighter.java  |    2 +-
 .../solr/internal/csv/writer/CSVConfig.java     |    2 +-
 .../internal/csv/writer/CSVConfigGuesser.java   |    8 +-
 .../org/apache/solr/parser/QueryParser.java     |   11 +-
 .../java/org/apache/solr/parser/QueryParser.jj  |   11 +-
 .../org/apache/solr/request/SimpleFacets.java   |   16 +-
 .../org/apache/solr/response/DocsStreamer.java  |   76 +-
 .../solr/response/TextResponseWriter.java       |    2 +-
 .../transform/ChildDocTransformerFactory.java   |    2 +-
 .../org/apache/solr/schema/CurrencyField.java   |    2 +-
 .../apache/solr/schema/ManagedIndexSchema.java  |    4 +-
 .../org/apache/solr/schema/RandomSortField.java |    2 +-
 .../solr/search/ComplexPhraseQParserPlugin.java |    4 +-
 .../apache/solr/search/ExportQParserPlugin.java |    2 +-
 .../solr/search/ExtendedDismaxQParser.java      |   12 +-
 .../apache/solr/search/HashQParserPlugin.java   |   18 +-
 .../QueryParserConfigurationException.java      |   24 -
 .../org/apache/solr/search/ReRankCollector.java |    2 +-
 .../apache/solr/search/ReRankQParserPlugin.java |    2 +-
 .../apache/solr/search/SolrDocumentFetcher.java |  571 +++
 .../apache/solr/search/SolrIndexSearcher.java   |  572 +--
 .../solr/search/facet/FacetFieldMerger.java     |   27 +-
 .../solr/search/facet/FacetFieldProcessor.java  |   29 +-
 .../search/facet/FacetRequestSortedMerger.java  |   24 +-
 .../org/apache/solr/search/facet/UniqueAgg.java |    2 +-
 .../search/function/CollapseScoreFunction.java  |    2 +-
 .../TopGroupsResultTransformer.java             |    6 +-
 .../org/apache/solr/search/join/GraphQuery.java |    2 +-
 .../solr/security/PermissionNameProvider.java   |    2 +-
 .../solr/spelling/PossibilityIterator.java      |    6 +-
 .../solr/spelling/SpellCheckCollation.java      |    2 +-
 .../org/apache/solr/update/CommitTracker.java   |    6 +-
 .../org/apache/solr/update/UpdateHandler.java   |   16 +
 .../java/org/apache/solr/update/UpdateLog.java  |    2 +-
 .../StatelessScriptUpdateProcessorFactory.java  |    6 +
 .../src/java/org/apache/solr/util/RTimer.java   |    2 +-
 .../org/apache/solr/util/SimplePostTool.java    |    6 +-
 .../org/apache/solr/util/SolrPluginUtils.java   |    7 +-
 .../org/apache/solr/util/TestInjection.java     |   26 +-
 .../conf/schema-HighlighterMaxOffsetTest.xml    |    9 +-
 .../collection1/conf/schema-copyfield-test.xml  |   50 +-
 .../collection1/conf/schema-docValuesJoin.xml   |    7 +-
 .../solr/collection1/conf/schema-folding.xml    |    2 +-
 .../solr/collection1/conf/schema-hash.xml       |   59 +-
 .../collection1/conf/schema-psuedo-fields.xml   |    7 +-
 .../collection1/conf/schema-required-fields.xml |   39 +-
 .../solr/collection1/conf/schema-rest.xml       |   97 +-
 .../conf/schema-single-dynamic-copy-field.xml   |   97 +-
 .../collection1/conf/schema-spellchecker.xml    |    2 +-
 .../solr/collection1/conf/schema-sql.xml        |   59 +-
 .../conf/schema-synonym-tokenizer.xml           |    5 +-
 .../solr/collection1/conf/schema-trie.xml       |   29 +-
 .../test-files/solr/collection1/conf/schema.xml |   81 +-
 .../solr/collection1/conf/schema11.xml          |   37 +-
 .../solr/collection1/conf/schema12.xml          |   92 +-
 .../solr/collection1/conf/schema15.xml          |   92 +-
 .../solr/collection1/conf/schema_latest.xml     |   45 +-
 .../solr/collection1/conf/schemasurround.xml    |   99 +-
 .../solr/collection1/conf/synonyms.txt          |    3 +-
 .../solr/collection1/conf/wdftypes.txt          |    2 +-
 .../configsets/cloud-dynamic/conf/schema.xml    |   35 +-
 .../solr/configsets/doc-expiry/conf/schema.xml  |   35 +-
 .../dih-script-transformer/managed-schema       |   25 +
 .../dih-script-transformer/solrconfig.xml       |   61 +
 .../configsets/upload/regular/managed-schema    |   25 +
 .../configsets/upload/regular/solrconfig.xml    |   61 +
 .../regular/xslt/xsl-update-handler-test.xsl    |   49 +
 .../with-run-executable-listener/managed-schema |   25 +
 .../with-run-executable-listener/solrconfig.xml |   69 +
 .../upload/with-script-processor/managed-schema |   25 +
 ...missleading.extension.updateprocessor.js.txt |   23 +
 .../upload/with-script-processor/solrconfig.xml |   65 +
 .../test/SecureRandomAlgorithmTesterApp.java    |   41 +
 .../org/apache/solr/ConvertedLegacyTest.java    |    2 +-
 .../solr/cloud/BaseCdcrDistributedZkTest.java   |    4 +-
 .../cloud/ChaosMonkeyNothingIsSafeTest.java     |    4 +-
 .../solr/cloud/ConnectionManagerTest.java       |    2 +-
 .../apache/solr/cloud/DistributedQueueTest.java |    2 +-
 .../apache/solr/cloud/LeaderElectionTest.java   |    4 +-
 ...rriddenZkACLAndCredentialsProvidersTest.java |    4 +-
 ...verseerCollectionConfigSetProcessorTest.java |    2 +-
 .../org/apache/solr/cloud/OverseerTest.java     |  134 +-
 .../cloud/SegmentTerminateEarlyTestState.java   |   78 +-
 .../apache/solr/cloud/SolrCLIZkUtilsTest.java   |  100 +-
 .../org/apache/solr/cloud/SolrXmlInZkTest.java  |    2 +-
 .../apache/solr/cloud/TestConfigSetsAPI.java    |  401 +-
 .../apache/solr/cloud/TestRandomFlRTGCloud.java |    4 +-
 .../apache/solr/cloud/TestSegmentSorting.java   |    2 +-
 .../test/org/apache/solr/cloud/ZkCLITest.java   |    4 +-
 .../org/apache/solr/cloud/ZkSolrClientTest.java |    2 +-
 .../solr/core/CachingDirectoryFactoryTest.java  |    2 +-
 .../solr/core/OpenCloseCoreStressTest.java      |    6 +-
 .../org/apache/solr/core/TestCodecSupport.java  |    2 +-
 .../apache/solr/core/TestDynamicLoading.java    |   11 +-
 .../handler/AnalysisRequestHandlerTestBase.java |    2 +-
 .../FieldAnalysisRequestHandlerTest.java        |   14 +-
 .../solr/handler/PingRequestHandlerTest.java    |    2 +-
 .../org/apache/solr/handler/TestSQLHandler.java |  250 +-
 .../handler/XmlUpdateRequestHandlerTest.java    |    2 +-
 .../admin/CoreMergeIndexesAdminHandlerTest.java |    2 +-
 .../DistributedFacetPivotSmallTest.java         |    5 +-
 .../DistributedTermsComponentTest.java          |    3 +-
 .../handler/component/TermsComponentTest.java   |   38 +
 .../handler/component/TestExpandComponent.java  |    2 -
 .../apache/solr/internal/csv/CSVParserTest.java |    2 +-
 .../reporters/SolrGraphiteReporterTest.java     |    2 +-
 .../response/TestGraphMLResponseWriter.java     |    2 +-
 .../solr/response/TestSolrQueryResponse.java    |    2 +-
 .../apache/solr/rest/TestManagedResource.java   |    4 +-
 .../org/apache/solr/rest/TestRestManager.java   |    2 +-
 .../solr/rest/schema/TestBulkSchemaAPI.java     |   29 +-
 .../apache/solr/schema/NumericFieldsTest.java   |  108 +-
 .../solr/schema/SpatialRPTFieldTypeTest.java    |   12 +-
 .../TestCloudManagedSchemaConcurrent.java       |    2 +-
 .../TrieIntPrefixActsAsRangeQueryFieldType.java |    2 +-
 .../solr/search/AnalyticsTestQParserPlugin.java |   10 +-
 .../org/apache/solr/search/LargeFieldTest.java  |    8 +-
 .../solr/search/TestExtendedDismaxParser.java   |  151 +-
 .../org/apache/solr/search/TestFiltering.java   |    2 +-
 .../org/apache/solr/search/TestRTGBase.java     |    2 +-
 .../apache/solr/search/TestRankQueryPlugin.java |   14 +-
 .../org/apache/solr/search/TestRealTimeGet.java |    4 +-
 .../apache/solr/search/TestReloadDeadlock.java  |    2 +-
 .../apache/solr/search/TestSolrQueryParser.java |   48 +-
 .../apache/solr/search/TestStressLucene.java    |    8 +-
 .../apache/solr/search/TestStressRecovery.java  |    4 +-
 .../apache/solr/search/TestStressReorder.java   |    4 +-
 .../solr/search/TestStressUserVersions.java     |    6 +-
 .../apache/solr/search/TestStressVersions.java  |    4 +-
 .../search/facet/TestJsonFacetRefinement.java   |  103 +-
 .../ConjunctionSolrSpellCheckerTest.java        |    4 +-
 .../spelling/IndexBasedSpellCheckerTest.java    |    2 +-
 .../store/blockcache/BlockDirectoryTest.java    |    2 +-
 .../solr/update/TestInPlaceUpdatesDistrib.java  |   22 +-
 .../apache/solr/util/SimplePostToolTest.java    |    2 +-
 .../apache/solr/util/SolrPluginUtilsTest.java   |   89 +-
 .../apache/solr/util/TestSolrCLIRunExample.java |    2 +-
 .../apache/solr/util/stats/MetricUtilsTest.java |    6 +
 solr/example/README.txt                         |    2 +-
 solr/example/example-DIH/README.txt             |   11 +-
 .../solr/atom/conf/atom-data-config.xml         |   35 +
 .../solr/atom/conf/lang/stopwords_en.txt        |   54 +
 .../example-DIH/solr/atom/conf/managed-schema   |  106 +
 .../example-DIH/solr/atom/conf/protwords.txt    |   17 +
 .../example-DIH/solr/atom/conf/solrconfig.xml   |   61 +
 .../example-DIH/solr/atom/conf/synonyms.txt     |   29 +
 .../example-DIH/solr/atom/conf/url_types.txt    |    1 +
 .../example-DIH/solr/atom/core.properties       |    0
 .../example-DIH/solr/db/conf/managed-schema     |   44 +-
 .../example-DIH/solr/db/conf/synonyms.txt       |    2 +-
 .../example-DIH/solr/mail/conf/managed-schema   |   45 +-
 .../example-DIH/solr/mail/conf/synonyms.txt     |    2 +-
 .../example-DIH/solr/rss/conf/admin-extra.html  |   24 -
 .../solr/rss/conf/admin-extra.menu-bottom.html  |   25 -
 .../solr/rss/conf/admin-extra.menu-top.html     |   25 -
 .../clustering/carrot2/kmeans-attributes.xml    |   19 -
 .../clustering/carrot2/lingo-attributes.xml     |   24 -
 .../conf/clustering/carrot2/stc-attributes.xml  |   19 -
 .../example-DIH/solr/rss/conf/currency.xml      |   67 -
 .../example-DIH/solr/rss/conf/elevate.xml       |   42 -
 .../solr/rss/conf/lang/contractions_ca.txt      |    8 -
 .../solr/rss/conf/lang/contractions_fr.txt      |   15 -
 .../solr/rss/conf/lang/contractions_ga.txt      |    5 -
 .../solr/rss/conf/lang/contractions_it.txt      |   23 -
 .../solr/rss/conf/lang/hyphenations_ga.txt      |    5 -
 .../solr/rss/conf/lang/stemdict_nl.txt          |    6 -
 .../solr/rss/conf/lang/stoptags_ja.txt          |  420 --
 .../solr/rss/conf/lang/stopwords_ar.txt         |  125 -
 .../solr/rss/conf/lang/stopwords_bg.txt         |  193 -
 .../solr/rss/conf/lang/stopwords_ca.txt         |  220 -
 .../solr/rss/conf/lang/stopwords_ckb.txt        |  136 -
 .../solr/rss/conf/lang/stopwords_cz.txt         |  172 -
 .../solr/rss/conf/lang/stopwords_da.txt         |  110 -
 .../solr/rss/conf/lang/stopwords_de.txt         |  294 --
 .../solr/rss/conf/lang/stopwords_el.txt         |   78 -
 .../solr/rss/conf/lang/stopwords_en.txt         |   54 -
 .../solr/rss/conf/lang/stopwords_es.txt         |  356 --
 .../solr/rss/conf/lang/stopwords_eu.txt         |   99 -
 .../solr/rss/conf/lang/stopwords_fa.txt         |  313 --
 .../solr/rss/conf/lang/stopwords_fi.txt         |   97 -
 .../solr/rss/conf/lang/stopwords_fr.txt         |  186 -
 .../solr/rss/conf/lang/stopwords_ga.txt         |  110 -
 .../solr/rss/conf/lang/stopwords_gl.txt         |  161 -
 .../solr/rss/conf/lang/stopwords_hi.txt         |  235 --
 .../solr/rss/conf/lang/stopwords_hu.txt         |  211 -
 .../solr/rss/conf/lang/stopwords_hy.txt         |   46 -
 .../solr/rss/conf/lang/stopwords_id.txt         |  359 --
 .../solr/rss/conf/lang/stopwords_it.txt         |  303 --
 .../solr/rss/conf/lang/stopwords_ja.txt         |  127 -
 .../solr/rss/conf/lang/stopwords_lv.txt         |  172 -
 .../solr/rss/conf/lang/stopwords_nl.txt         |  119 -
 .../solr/rss/conf/lang/stopwords_no.txt         |  194 -
 .../solr/rss/conf/lang/stopwords_pt.txt         |  253 --
 .../solr/rss/conf/lang/stopwords_ro.txt         |  233 --
 .../solr/rss/conf/lang/stopwords_ru.txt         |  243 --
 .../solr/rss/conf/lang/stopwords_sv.txt         |  133 -
 .../solr/rss/conf/lang/stopwords_th.txt         |  119 -
 .../solr/rss/conf/lang/stopwords_tr.txt         |  212 -
 .../solr/rss/conf/lang/userdict_ja.txt          |   29 -
 .../example-DIH/solr/rss/conf/managed-schema    | 1079 -----
 .../solr/rss/conf/mapping-FoldToASCII.txt       | 3813 ------------------
 .../solr/rss/conf/mapping-ISOLatin1Accent.txt   |  246 --
 .../example-DIH/solr/rss/conf/protwords.txt     |   21 -
 .../solr/rss/conf/rss-data-config.xml           |   26 -
 .../example-DIH/solr/rss/conf/solrconfig.xml    | 1396 -------
 .../example-DIH/solr/rss/conf/spellings.txt     |    2 -
 .../example-DIH/solr/rss/conf/stopwords.txt     |   14 -
 .../example-DIH/solr/rss/conf/synonyms.txt      |   29 -
 .../example-DIH/solr/rss/conf/update-script.js  |   53 -
 .../example-DIH/solr/rss/conf/xslt/example.xsl  |  132 -
 .../solr/rss/conf/xslt/example_atom.xsl         |   67 -
 .../solr/rss/conf/xslt/example_rss.xsl          |   66 -
 .../example-DIH/solr/rss/conf/xslt/luke.xsl     |  337 --
 .../solr/rss/conf/xslt/updateXml.xsl            |   70 -
 .../example-DIH/solr/rss/core.properties        |    0
 .../example-DIH/solr/solr/conf/managed-schema   |   44 +-
 .../example-DIH/solr/solr/conf/synonyms.txt     |    2 +-
 .../example-DIH/solr/tika/conf/admin-extra.html |   24 -
 .../solr/tika/conf/admin-extra.menu-bottom.html |   25 -
 .../solr/tika/conf/admin-extra.menu-top.html    |   25 -
 .../example-DIH/solr/tika/conf/managed-schema   |  899 +----
 .../example-DIH/solr/tika/conf/solrconfig.xml   | 1354 +------
 .../solr/tika/conf/tika-data-config.xml         |   33 +-
 solr/example/files/conf/managed-schema          |   30 +-
 solr/example/files/conf/synonyms.txt            |    2 +-
 solr/licenses/Saxon-HE-9.6.0-2.jar.sha1         |    1 -
 solr/licenses/Saxon-HE-LICENSE-MPL.txt          |  108 -
 solr/licenses/aopalliance-1.0.jar.sha1          |    1 -
 solr/licenses/aopalliance-LICENSE-PD.txt        |    1 -
 solr/licenses/argparse4j-0.4.3.jar.sha1         |    1 -
 solr/licenses/argparse4j-LICENSE-MIT.txt        |   23 -
 solr/licenses/avro-1.7.5.jar.sha1               |    1 -
 solr/licenses/avro-LICENSE-ASL.txt              |  308 --
 solr/licenses/avro-NOTICE.txt                   |    9 -
 solr/licenses/bcpkix-jdk15on-1.47.jar.sha1      |    1 -
 .../bcpkix-jdk15on-LICENSE-BSD_LIKE.txt         |   15 -
 solr/licenses/bcpkix-jdk15on-NOTICE.txt         |    2 -
 solr/licenses/config-1.0.2.jar.sha1             |    1 -
 solr/licenses/config-LICENSE-ASL.txt            |  202 -
 solr/licenses/config-NOTICE.txt                 |    0
 solr/licenses/guice-3.0.jar.sha1                |    1 -
 solr/licenses/guice-LICENSE-ASL.txt             |  202 -
 solr/licenses/guice-NOTICE.txt                  |    0
 solr/licenses/guice-servlet-3.0.jar.sha1        |    1 -
 solr/licenses/guice-servlet-LICENSE-ASL.txt     |  202 -
 solr/licenses/guice-servlet-NOTICE.txt          |    0
 .../hadoop-mapreduce-client-app-2.7.2.jar.sha1  |    1 -
 .../hadoop-mapreduce-client-app-LICENSE-ASL.txt |  244 --
 .../hadoop-mapreduce-client-app-NOTICE.txt      |    2 -
 ...adoop-mapreduce-client-common-2.7.2.jar.sha1 |    1 -
 ...doop-mapreduce-client-common-LICENSE-ASL.txt |  244 --
 .../hadoop-mapreduce-client-common-NOTICE.txt   |    2 -
 .../hadoop-mapreduce-client-core-2.7.2.jar.sha1 |    1 -
 ...hadoop-mapreduce-client-core-LICENSE-ASL.txt |  244 --
 .../hadoop-mapreduce-client-core-NOTICE.txt     |    2 -
 .../hadoop-mapreduce-client-hs-2.7.2.jar.sha1   |    1 -
 .../hadoop-mapreduce-client-hs-LICENSE-ASL.txt  |  244 --
 .../hadoop-mapreduce-client-hs-NOTICE.txt       |    2 -
 ...reduce-client-jobclient-2.7.2-tests.jar.sha1 |    1 -
 ...op-mapreduce-client-jobclient-2.7.2.jar.sha1 |    1 -
 ...p-mapreduce-client-jobclient-LICENSE-ASL.txt |  244 --
 ...hadoop-mapreduce-client-jobclient-NOTICE.txt |    2 -
 ...doop-mapreduce-client-shuffle-2.7.2.jar.sha1 |    1 -
 ...oop-mapreduce-client-shuffle-LICENSE-ASL.txt |  244 --
 .../hadoop-mapreduce-client-shuffle-NOTICE.txt  |    2 -
 solr/licenses/hadoop-yarn-api-2.7.2.jar.sha1    |    1 -
 solr/licenses/hadoop-yarn-api-LICENSE-ASL.txt   |  244 --
 solr/licenses/hadoop-yarn-api-NOTICE.txt        |    2 -
 solr/licenses/hadoop-yarn-client-2.7.2.jar.sha1 |    1 -
 .../licenses/hadoop-yarn-client-LICENSE-ASL.txt |  244 --
 solr/licenses/hadoop-yarn-client-NOTICE.txt     |    2 -
 solr/licenses/hadoop-yarn-common-2.7.2.jar.sha1 |    1 -
 .../licenses/hadoop-yarn-common-LICENSE-ASL.txt |  244 --
 solr/licenses/hadoop-yarn-common-NOTICE.txt     |    2 -
 ...ver-applicationhistoryservice-2.7.2.jar.sha1 |    1 -
 ...er-applicationhistoryservice-LICENSE-ASL.txt |  244 --
 ...-server-applicationhistoryservice-NOTICE.txt |    2 -
 .../hadoop-yarn-server-common-2.7.2.jar.sha1    |    1 -
 .../hadoop-yarn-server-common-LICENSE-ASL.txt   |  244 --
 .../hadoop-yarn-server-common-NOTICE.txt        |    2 -
 ...adoop-yarn-server-nodemanager-2.7.2.jar.sha1 |    1 -
 ...doop-yarn-server-nodemanager-LICENSE-ASL.txt |  244 --
 .../hadoop-yarn-server-nodemanager-NOTICE.txt   |    2 -
 ...p-yarn-server-resourcemanager-2.7.2.jar.sha1 |    1 -
 ...-yarn-server-resourcemanager-LICENSE-ASL.txt |  244 --
 ...adoop-yarn-server-resourcemanager-NOTICE.txt |    2 -
 ...adoop-yarn-server-tests-2.7.2-tests.jar.sha1 |    1 -
 .../hadoop-yarn-server-tests-LICENSE-ASL.txt    |  244 --
 .../hadoop-yarn-server-tests-NOTICE.txt         |    2 -
 .../hadoop-yarn-server-web-proxy-2.7.2.jar.sha1 |    1 -
 ...hadoop-yarn-server-web-proxy-LICENSE-ASL.txt |  244 --
 .../hadoop-yarn-server-web-proxy-NOTICE.txt     |    2 -
 solr/licenses/jackson-jaxrs-1.9.13.jar.sha1     |    1 -
 solr/licenses/jackson-jaxrs-LICENSE-ASL.txt     |   13 -
 solr/licenses/jackson-jaxrs-NOTICE.txt          |    7 -
 solr/licenses/javax.inject-1.jar.sha1           |    1 -
 solr/licenses/javax.inject-LICENSE-ASL.txt      |  202 -
 solr/licenses/javax.inject-NOTICE.txt           |    0
 solr/licenses/jaxb-impl-2.2.3-1.jar.sha1        |    1 -
 solr/licenses/jaxb-impl-LICENSE-CDDL.txt        |  135 -
 solr/licenses/jersey-bundle-1.9.jar.sha1        |    1 -
 solr/licenses/jersey-bundle-LICENSE-CDDL.txt    |   85 -
 solr/licenses/jersey-guice-1.9.jar.sha1         |    1 -
 solr/licenses/jersey-guice-LICENSE-CDDL.txt     |   85 -
 solr/licenses/jersey-json-1.9.jar.sha1          |    1 -
 solr/licenses/jersey-json-LICENSE-CDDL.txt      |   85 -
 .../kite-morphlines-avro-1.1.0.jar.sha1         |    1 -
 .../kite-morphlines-avro-LICENSE-ASL.txt        |  202 -
 solr/licenses/kite-morphlines-avro-NOTICE.txt   |    8 -
 .../kite-morphlines-core-1.1.0-tests.jar.sha1   |    1 -
 .../kite-morphlines-core-1.1.0.jar.sha1         |    1 -
 .../kite-morphlines-core-LICENSE-ASL.txt        |  202 -
 solr/licenses/kite-morphlines-core-NOTICE.txt   |    8 -
 ...orphlines-hadoop-sequencefile-1.1.0.jar.sha1 |    1 -
 ...rphlines-hadoop-sequencefile-LICENSE-ASL.txt |  202 -
 ...te-morphlines-hadoop-sequencefile-NOTICE.txt |    8 -
 .../kite-morphlines-json-1.1.0.jar.sha1         |    1 -
 .../kite-morphlines-json-LICENSE-ASL.txt        |  202 -
 solr/licenses/kite-morphlines-json-NOTICE.txt   |    8 -
 .../kite-morphlines-saxon-1.1.0.jar.sha1        |    1 -
 .../kite-morphlines-saxon-LICENSE-ASL.txt       |  202 -
 solr/licenses/kite-morphlines-saxon-NOTICE.txt  |    8 -
 .../kite-morphlines-tika-core-1.1.0.jar.sha1    |    1 -
 .../kite-morphlines-tika-core-LICENSE-ASL.txt   |  202 -
 .../kite-morphlines-tika-core-NOTICE.txt        |    8 -
 ...te-morphlines-tika-decompress-1.1.0.jar.sha1 |    1 -
 ...e-morphlines-tika-decompress-LICENSE-ASL.txt |  202 -
 .../kite-morphlines-tika-decompress-NOTICE.txt  |    8 -
 .../kite-morphlines-twitter-1.1.0.jar.sha1      |    1 -
 .../kite-morphlines-twitter-LICENSE-ASL.txt     |  202 -
 .../licenses/kite-morphlines-twitter-NOTICE.txt |    8 -
 solr/licenses/leveldb-0.7.jar.sha1              |    1 -
 solr/licenses/leveldb-LICENSE-BSD.txt           |   27 -
 solr/licenses/leveldb-NOTICE.txt                |    0
 solr/licenses/leveldb-api-0.7.jar.sha1          |    1 -
 solr/licenses/leveldb-api-LICENSE-BSD.txt       |   27 -
 solr/licenses/leveldb-api-NOTICE.txt            |    0
 solr/licenses/leveldbjni-1.8.jar.sha1           |    1 -
 solr/licenses/leveldbjni-LICENSE-BSD.txt        |   27 -
 solr/licenses/leveldbjni-NOTICE.txt             |    0
 .../metrics-healthchecks-3.1.2.jar.sha1         |    1 -
 .../metrics-healthchecks-LICENSE-ASL.txt        |  202 -
 solr/licenses/metrics-healthchecks-NOTICE.txt   |   11 -
 solr/licenses/mrunit-1.0.0-hadoop2.jar.sha1     |    1 -
 solr/licenses/mrunit-LICENSE-ASL.txt            |  479 ---
 solr/licenses/mrunit-NOTICE.txt                 |    5 -
 solr/licenses/netty-3.2.4.Final.jar.sha1        |    1 -
 solr/licenses/netty-LICENSE-ASL.txt             |  202 -
 solr/licenses/netty-NOTICE.txt                  |   98 -
 solr/licenses/paranamer-2.3.jar.sha1            |    1 -
 solr/licenses/paranamer-LICENSE-BSD.txt         |   28 -
 solr/licenses/paranamer-NOTICE.txt              |    0
 solr/licenses/rome-1.6.1.jar.sha1               |    1 -
 solr/licenses/snappy-java-1.0.5.jar.sha1        |    1 -
 solr/licenses/snappy-java-LICENSE-ASL.txt       |  201 -
 solr/licenses/snappy-java-NOTICE.txt            |    0
 .../map-reduce/set-map-reduce-classpath.sh      |   52 -
 .../basic_configs/conf/managed-schema           |   44 +-
 .../configsets/basic_configs/conf/synonyms.txt  |    2 +-
 .../conf/managed-schema                         |   44 +-
 .../conf/synonyms.txt                           |    2 +-
 .../conf/managed-schema                         |   44 +-
 .../conf/synonyms.txt                           |    2 +-
 .../solr/client/solrj/impl/CloudSolrClient.java |   15 +-
 .../solrj/impl/ConcurrentUpdateSolrClient.java  |    2 +-
 .../apache/solr/client/solrj/io/ModelCache.java |    4 +-
 .../solrj/io/eval/ArcCosineEvaluator.java       |   60 +
 .../client/solrj/io/eval/ArcSineEvaluator.java  |   60 +
 .../solrj/io/eval/ArcTangentEvaluator.java      |   60 +
 .../client/solrj/io/eval/BooleanEvaluator.java  |    2 +-
 .../client/solrj/io/eval/CeilingEvaluator.java  |   61 +
 .../client/solrj/io/eval/CoalesceEvaluator.java |   52 +
 .../client/solrj/io/eval/ComplexEvaluator.java  |    6 +
 .../solrj/io/eval/ConditionalEvaluator.java     |    2 +-
 .../client/solrj/io/eval/CosineEvaluator.java   |   60 +
 .../solrj/io/eval/CubedRootEvaluator.java       |   60 +
 .../client/solrj/io/eval/FloorEvaluator.java    |   61 +
 .../io/eval/HyperbolicCosineEvaluator.java      |   60 +
 .../solrj/io/eval/HyperbolicSineEvaluator.java  |   60 +
 .../io/eval/HyperbolicTangentEvaluator.java     |   60 +
 .../client/solrj/io/eval/ModuloEvaluator.java   |   78 +
 .../client/solrj/io/eval/PowerEvaluator.java    |   61 +
 .../client/solrj/io/eval/RoundEvaluator.java    |   60 +
 .../client/solrj/io/eval/SimpleEvaluator.java   |    7 +
 .../client/solrj/io/eval/SineEvaluator.java     |   60 +
 .../solrj/io/eval/SquareRootEvaluator.java      |   60 +
 .../client/solrj/io/eval/StreamEvaluator.java   |    2 +
 .../client/solrj/io/eval/TangentEvaluator.java  |   60 +
 .../client/solrj/io/eval/UuidEvaluator.java     |   57 +
 .../solrj/io/graph/GatherNodesStream.java       |    2 +-
 .../solrj/io/graph/ShortestPathStream.java      |    2 +-
 .../client/solrj/io/ops/GroupOperation.java     |    2 +-
 .../solrj/io/stream/CartesianProductStream.java |    6 +-
 .../client/solrj/io/stream/FacetStream.java     |    8 +-
 .../solr/client/solrj/io/stream/RankStream.java |    2 +-
 .../client/solrj/io/stream/SelectStream.java    |    6 +
 .../solrj/io/stream/SignificantTermsStream.java |    2 +-
 .../client/solrj/io/stream/StatsStream.java     |   14 +-
 .../client/solrj/io/stream/TextLogitStream.java |    2 +-
 .../solrj/io/stream/metrics/MeanMetric.java     |   35 +-
 .../client/solrj/io/stream/metrics/Metric.java  |    1 +
 .../client/solrj/response/QueryResponse.java    |    6 +-
 .../solrj/response/SpellCheckResponse.java      |    4 +-
 .../client/solrj/response/TermsResponse.java    |   37 +-
 .../org/apache/solr/common/cloud/DocRouter.java |    4 +-
 .../solr/common/cloud/ZkMaintenanceUtils.java   |  147 +-
 .../apache/solr/common/cloud/ZkStateReader.java |    4 +-
 .../apache/solr/common/params/CommonParams.java |    4 +-
 .../solr/common/params/ConfigSetParams.java     |    1 +
 .../apache/solr/common/params/TermsParams.java  |   12 +-
 .../solr/common/util/JsonRecordReader.java      |    2 +-
 .../org/apache/solr/common/util/StrUtils.java   |    2 +-
 .../solrj/solr/collection1/conf/schema-sql.xml  |   59 +-
 .../solrj/solr/collection1/conf/schema.xml      |   59 +-
 .../solr/configsets/streaming/conf/schema.xml   |   56 +-
 .../solr/client/solrj/TestLBHttpSolrClient.java |    2 +-
 .../embedded/SolrExampleStreamingTest.java      |    4 +-
 .../impl/ConcurrentUpdateSolrClientTest.java    |    6 +-
 .../solrj/io/stream/StreamExpressionTest.java   |  132 +-
 .../stream/StreamExpressionToExpessionTest.java |    8 +-
 .../io/stream/eval/ArcCosineEvaluatorTest.java  |   91 +
 .../io/stream/eval/ArcSineEvaluatorTest.java    |   91 +
 .../io/stream/eval/ArcTangentEvaluatorTest.java |   91 +
 .../io/stream/eval/CeilingEvaluatorTest.java    |   96 +
 .../io/stream/eval/CoalesceEvaluatorTest.java   |  112 +
 .../io/stream/eval/CosineEvaluatorTest.java     |   91 +
 .../io/stream/eval/CubedRootEvaluatorTest.java  |   91 +
 .../io/stream/eval/FloorEvaluatorTest.java      |   96 +
 .../eval/HyperbolicCosineEvaluatorTest.java     |   91 +
 .../eval/HyperbolicSineEvaluatorTest.java       |   91 +
 .../eval/HyperbolicTangentEvaluatorTest.java    |   91 +
 .../io/stream/eval/ModuloEvaluatorTest.java     |  164 +
 .../io/stream/eval/PowerEvaluatorTest.java      |  119 +
 .../io/stream/eval/RoundEvaluatorTest.java      |   95 +
 .../solrj/io/stream/eval/SineEvaluatorTest.java |   91 +
 .../io/stream/eval/SquareRootEvaluatorTest.java |   91 +
 .../io/stream/eval/TangentEvaluatorTest.java    |   91 +
 .../solrj/io/stream/eval/UuidEvaluatorTest.java |   52 +
 .../solr/client/solrj/request/SchemaTest.java   |    2 +-
 .../solr/common/util/TestJavaBinCodec.java      |    4 +-
 .../java/org/apache/solr/SolrTestCaseJ4.java    |   11 +-
 .../apache/solr/cloud/MiniSolrCloudCluster.java |    6 +-
 solr/webapp/web/css/angular/cloud.css           |   24 +
 solr/webapp/web/js/angular/controllers/cloud.js |   19 +-
 solr/webapp/web/partials/cloud.html             |    1 +
 972 files changed, 8827 insertions(+), 77971 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/java/org/apache/solr/core/CoreContainer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/java/org/apache/solr/core/SolrCore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
index 36f4254,98bf11a..062f532
--- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
@@@ -90,8 -90,7 +90,9 @@@ import org.apache.solr.core.SolrEventLi
  import org.apache.solr.core.backup.repository.BackupRepository;
  import org.apache.solr.core.backup.repository.LocalFileSystemRepository;
  import org.apache.solr.core.snapshots.SolrSnapshotMetaDataManager;
 +import org.apache.solr.metrics.MetricsMap;
 +import org.apache.solr.metrics.SolrMetricManager;
+ import org.apache.solr.handler.IndexFetcher.IndexFetchResult;
  import org.apache.solr.request.SolrQueryRequest;
  import org.apache.solr.response.SolrQueryResponse;
  import org.apache.solr.search.SolrIndexSearcher;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index aa13cbf,656ac71..2519a47
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@@ -748,9 -777,20 +775,9 @@@ public class ExpandComponent extends Se
      return Category.QUERY;
    }
  
 -  @Override
 -  public URL[] getDocs() {
 -    try {
 -      return new URL[]{
 -          new URL("http://wiki.apache.org/solr/ExpandComponent")
 -      };
 -    } catch (MalformedURLException e) {
 -      throw new RuntimeException(e);
 -    }
 -  }
 -
    // this reader alters the content of the given reader so it should not
    // delegate the caching stuff
-   private class ReaderWrapper extends FilterLeafReader {
+   private static class ReaderWrapper extends FilterLeafReader {
  
      private String field;
  

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
----------------------------------------------------------------------
diff --cc solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
index 04e970c,4207a9b..9b38225
--- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
@@@ -18,18 -18,14 +18,14 @@@ package org.apache.solr.search
  
  import java.io.Closeable;
  import java.io.IOException;
- import java.io.Reader;
  import java.lang.invoke.MethodHandles;
- import java.nio.charset.StandardCharsets;
 -import java.net.URL;
  import java.util.ArrayList;
  import java.util.Arrays;
- import java.util.Collection;
  import java.util.Collections;
  import java.util.Comparator;
  import java.util.Date;
  import java.util.HashMap;
 +import java.util.HashSet;
- import java.util.LinkedList;
  import java.util.List;
  import java.util.Map;
  import java.util.Objects;
@@@ -38,15 -34,21 +34,22 @@@ import java.util.concurrent.TimeUnit
  import java.util.concurrent.atomic.AtomicLong;
  import java.util.concurrent.atomic.AtomicReference;
  
 +import com.codahale.metrics.MetricRegistry;
  import com.google.common.collect.Iterables;
- import org.apache.lucene.analysis.Analyzer;
- import org.apache.lucene.analysis.TokenStream;
  import org.apache.lucene.document.Document;
- import org.apache.lucene.document.DocumentStoredFieldVisitor;
- import org.apache.lucene.document.LazyDocument;
- import org.apache.lucene.index.*;
- import org.apache.lucene.index.StoredFieldVisitor.Status;
+ import org.apache.lucene.index.DirectoryReader;
+ import org.apache.lucene.index.ExitableDirectoryReader;
+ import org.apache.lucene.index.FieldInfos;
+ import org.apache.lucene.index.IndexReader;
+ import org.apache.lucene.index.LeafReader;
+ import org.apache.lucene.index.LeafReaderContext;
+ import org.apache.lucene.index.MultiPostingsEnum;
+ import org.apache.lucene.index.PostingsEnum;
+ import org.apache.lucene.index.StoredFieldVisitor;
+ import org.apache.lucene.index.Term;
+ import org.apache.lucene.index.TermContext;
+ import org.apache.lucene.index.Terms;
+ import org.apache.lucene.index.TermsEnum;
  import org.apache.lucene.search.*;
  import org.apache.lucene.search.BooleanClause.Occur;
  import org.apache.lucene.store.Directory;
@@@ -160,10 -136,10 +137,10 @@@ public class SolrIndexSearcher extends 
    private final String path;
    private boolean releaseDirectory;
  
 -  private final NamedList<Object> readerStats;
 +  private Set<String> metricNames = new HashSet<>();
  
    private static DirectoryReader getReader(SolrCore core, SolrIndexConfig config, DirectoryFactory directoryFactory,
-       String path) throws IOException {
+                                            String path) throws IOException {
      final Directory dir = directoryFactory.get(path, DirContext.DEFAULT, config.lockType);
      try {
        return core.getIndexReaderFactory().newReader(dir, core);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/04a71229/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
----------------------------------------------------------------------
diff --cc solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
index 6fc6e2d,607f091..95d3da7
--- a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
@@@ -34,7 -34,9 +34,9 @@@ import org.apache.lucene.search.TermInS
  import org.apache.lucene.search.TermQuery;
  import org.apache.solr.SolrTestCaseJ4;
  import org.apache.solr.common.params.MapSolrParams;
 +import org.apache.solr.metrics.MetricsMap;
+ import org.apache.solr.common.params.ModifiableSolrParams;
+ import org.apache.solr.common.params.SolrParams;
 -import org.apache.solr.core.SolrInfoMBean;
  import org.apache.solr.parser.QueryParser;
  import org.apache.solr.query.FilterQuery;
  import org.apache.solr.request.SolrQueryRequest;


[26/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10351: Add analyze Stream Evaluator to support streaming NLP

Posted by ab...@apache.org.
SOLR-10351: Add analyze Stream Evaluator to support streaming NLP


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/6c2155c0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/6c2155c0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/6c2155c0

Branch: refs/heads/jira/solr-9959
Commit: 6c2155c02434bfae2ff5aa62c9ffe57318063626
Parents: edafcba
Author: Joel Bernstein <jb...@apache.org>
Authored: Thu Mar 30 17:34:28 2017 +0100
Committer: Joel Bernstein <jb...@apache.org>
Committed: Thu Mar 30 17:53:07 2017 +0100

----------------------------------------------------------------------
 .../apache/solr/handler/AnalyzeEvaluator.java   | 111 ++++++++++++++++
 .../org/apache/solr/handler/StreamHandler.java  |   1 +
 .../client/solrj/io/eval/BooleanEvaluator.java  |   9 +-
 .../solrj/io/eval/ConditionalEvaluator.java     |   6 +
 .../client/solrj/io/eval/NumberEvaluator.java   |   6 +
 .../client/solrj/io/eval/SimpleEvaluator.java   |   7 +
 .../client/solrj/io/eval/StreamEvaluator.java   |   2 +
 .../solrj/io/stream/CartesianProductStream.java |   6 +-
 .../client/solrj/io/stream/SelectStream.java    |   6 +
 .../solrj/io/stream/StreamExpressionTest.java   | 133 ++++++++++++++++++-
 10 files changed, 278 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c2155c0/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java b/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java
new file mode 100644
index 0000000..207f404
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/handler/AnalyzeEvaluator.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ */
+package org.apache.solr.handler;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.StreamContext;
+import org.apache.solr.client.solrj.io.stream.expr.Explanation;
+import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionValue;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.apache.solr.client.solrj.io.eval.*;
+
+import org.apache.solr.common.SolrException;
+import org.apache.lucene.analysis.*;
+import org.apache.solr.core.SolrCore;
+
+public class AnalyzeEvaluator extends SimpleEvaluator {
+  private static final long serialVersionUID = 1L;
+
+  private String fieldName;
+  private String analyzerField;
+  private Analyzer analyzer;
+
+  public AnalyzeEvaluator(String _fieldName, String _analyzerField) {
+    init(_fieldName, _analyzerField);
+  }
+
+  public AnalyzeEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    String _fieldName = factory.getValueOperand(expression, 0);
+    String _analyzerField = factory.getValueOperand(expression, 1);
+    init(_fieldName, _analyzerField);
+  }
+
+  public void setStreamContext(StreamContext context) {
+    Object solrCoreObj = context.get("solr-core");
+    if (solrCoreObj == null || !(solrCoreObj instanceof SolrCore) ) {
+      throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "StreamContext must have SolrCore in solr-core key");
+    }
+    SolrCore solrCore = (SolrCore) solrCoreObj;
+
+    analyzer = solrCore.getLatestSchema().getFieldType(analyzerField).getIndexAnalyzer();
+  }
+
+  private void init(String fieldName, String analyzerField) {
+    this.fieldName = fieldName;
+    if(analyzerField == null) {
+      this.analyzerField = fieldName;
+    } else {
+      this.analyzerField = analyzerField;
+    }
+  }
+
+  @Override
+  public Object evaluate(Tuple tuple) throws IOException {
+    String value = tuple.getString(fieldName);
+    if(value == null) {
+      return null;
+    }
+
+    TokenStream tokenStream = analyzer.tokenStream(analyzerField, value);
+    CharTermAttribute termAtt = tokenStream.getAttribute(CharTermAttribute.class);
+    tokenStream.reset();
+    List<String> tokens = new ArrayList();
+    while (tokenStream.incrementToken()) {
+      tokens.add(termAtt.toString());
+    }
+
+    tokenStream.end();
+    tokenStream.close();
+
+    return tokens;
+  }
+
+  @Override
+  public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException {
+    return new StreamExpressionValue(fieldName);
+  }
+
+  @Override
+  public Explanation toExplanation(StreamFactory factory) throws IOException {
+    return new Explanation(nodeId.toString())
+        .withExpressionType(ExpressionType.EVALUATOR)
+        .withImplementingClass(getClass().getName())
+        .withExpression(toExpression(factory).toString());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c2155c0/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index 5d10664..3ede732 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -209,6 +209,7 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
       .withFunctionName("log", NaturalLogEvaluator.class)
       // Conditional Stream Evaluators
       .withFunctionName("if", IfThenElseEvaluator.class)
+      .withFunctionName("analyze", AnalyzeEvaluator.class)
       ;
 
      // This pulls all the overrides and additions from the config

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c2155c0/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/BooleanEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/BooleanEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/BooleanEvaluator.java
index bf21f1d..f02f1fa 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/BooleanEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/BooleanEvaluator.java
@@ -24,11 +24,13 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.StreamContext;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 public abstract class BooleanEvaluator extends ComplexEvaluator {
   protected static final long serialVersionUID = 1L;
+  protected StreamContext streamContext;
   
   public BooleanEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
     super(expression, factory);
@@ -45,7 +47,12 @@ public abstract class BooleanEvaluator extends ComplexEvaluator {
     
     return results;
   }
-  
+
+  public void setStreamContext(StreamContext streamContext) {
+    this.streamContext = streamContext;
+  }
+
+
   public interface Checker {
     default boolean isNullAllowed(){
       return false;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c2155c0/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConditionalEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConditionalEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConditionalEvaluator.java
index 499e2f8..6126544 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConditionalEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/ConditionalEvaluator.java
@@ -24,11 +24,13 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.StreamContext;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 public abstract class ConditionalEvaluator extends ComplexEvaluator {
   protected static final long serialVersionUID = 1L;
+  protected StreamContext streamContext;
   
   public ConditionalEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
     super(expression, factory);
@@ -42,6 +44,10 @@ public abstract class ConditionalEvaluator extends ComplexEvaluator {
     
     return results;
   }
+
+  public void setStreamContext(StreamContext streamContext) {
+    this.streamContext = streamContext;
+  }
   
   public interface Checker {
     default boolean isNullAllowed(){

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c2155c0/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/NumberEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/NumberEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/NumberEvaluator.java
index f4491fd..283c7b1 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/NumberEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/NumberEvaluator.java
@@ -26,11 +26,13 @@ import java.util.List;
 import java.util.Locale;
 
 import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.StreamContext;
 import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
 import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
 
 public abstract class NumberEvaluator extends ComplexEvaluator {
   protected static final long serialVersionUID = 1L;
+  protected StreamContext streamContext;
   
   public NumberEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
     super(expression, factory);
@@ -38,6 +40,10 @@ public abstract class NumberEvaluator extends ComplexEvaluator {
   
   // restrict result to a Number
   public abstract Number evaluate(Tuple tuple) throws IOException;
+
+  public void setStreamContext(StreamContext context) {
+    this.streamContext = context;
+  }
   
   public List<BigDecimal> evaluateAll(final Tuple tuple) throws IOException {
     // evaluate each and confirm they are all either null or numeric

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c2155c0/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SimpleEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SimpleEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SimpleEvaluator.java
index 79d1799..5ee1715 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SimpleEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/SimpleEvaluator.java
@@ -21,9 +21,16 @@ package org.apache.solr.client.solrj.io.eval;
 
 import java.util.UUID;
 
+import org.apache.solr.client.solrj.io.stream.StreamContext;
+
 public abstract class SimpleEvaluator implements StreamEvaluator {
   private static final long serialVersionUID = 1L;
   
   protected UUID nodeId = UUID.randomUUID();
+  protected StreamContext streamContext;
+
+  public void setStreamContext(StreamContext streamContext) {
+    this.streamContext = streamContext;
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c2155c0/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/StreamEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/StreamEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/StreamEvaluator.java
index 6bc4d50..1774c46 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/StreamEvaluator.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/StreamEvaluator.java
@@ -23,8 +23,10 @@ import java.io.IOException;
 import java.io.Serializable;
 
 import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.StreamContext;
 import org.apache.solr.client.solrj.io.stream.expr.Expressible;
 
 public interface StreamEvaluator extends Expressible, Serializable {
   Object evaluate(final Tuple tuple) throws IOException;
+  void setStreamContext(StreamContext streamContext);
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c2155c0/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CartesianProductStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CartesianProductStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CartesianProductStream.java
index feb10c7..6514ae4 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CartesianProductStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CartesianProductStream.java
@@ -49,7 +49,7 @@ public class CartesianProductStream extends TupleStream implements Expressible {
   private List<NamedEvaluator> evaluators;
   private StreamComparator orderBy;
   
-  // Used to contain the sorted queue of generated tuples 
+  // Used to contain the sorted queue of generated tuples
   private LinkedList<Tuple> generatedTuples;
   
   public CartesianProductStream(StreamExpression expression,StreamFactory factory) throws IOException {
@@ -59,7 +59,6 @@ public class CartesianProductStream extends TupleStream implements Expressible {
     List<StreamExpression> streamExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, TupleStream.class);
     List<StreamExpressionParameter> evaluateAsExpressions = factory.getOperandsOfType(expression, StreamExpressionValue.class);
     StreamExpressionNamedParameter orderByExpression = factory.getNamedOperand(expression, "productSort");
-    
     // validate expression contains only what we want.
     if(expression.getParameters().size() != streamExpressions.size() + evaluateAsExpressions.size() + (null == orderByExpression ? 0 : 1)){
       throw new IOException(String.format(Locale.ROOT,"Invalid %s expression %s - unknown operands found", functionName, expression));
@@ -259,6 +258,9 @@ public class CartesianProductStream extends TupleStream implements Expressible {
   
   public void setStreamContext(StreamContext context) {
     this.stream.setStreamContext(context);
+    for(NamedEvaluator evaluator : evaluators) {
+      evaluator.getEvaluator().setStreamContext(context);
+    }
   }
 
   public List<TupleStream> children() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c2155c0/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java
index eed8182..c0cbc17 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SelectStream.java
@@ -22,6 +22,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.solr.client.solrj.io.Tuple;
 import org.apache.solr.client.solrj.io.comp.StreamComparator;
@@ -213,6 +214,11 @@ public class SelectStream extends TupleStream implements Expressible {
 
   public void setStreamContext(StreamContext context) {
     this.stream.setStreamContext(context);
+    Set<StreamEvaluator> evaluators = selectedEvaluators.keySet();
+
+    for(StreamEvaluator evaluator : evaluators) {
+      evaluator.setStreamContext(context);
+    }
   }
 
   public List<TupleStream> children() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/6c2155c0/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
index 6c96025..18ddb93 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionTest.java
@@ -61,6 +61,7 @@ import org.apache.solr.cloud.AbstractDistribZkTestBase;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.common.params.CommonParams;
 import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.handler.AnalyzeEvaluator;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -379,7 +380,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i asc\")");
     tuples = getTuples(stream);
     assert(tuples.size() == 6);
-    assertOrder(tuples, 0,1,5,2,3,4);
+    assertOrder(tuples, 0, 1, 5, 2, 3, 4);
 
     // Basic test desc
     stream = factory.constructStream("sort(search(" + COLLECTIONORALIAS + ", q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc\"), by=\"a_i desc\")");
@@ -1908,7 +1909,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     stream = new InnerJoinStream(expression, factory);
     tuples = getTuples(stream);    
     assert(tuples.size() == 8);
-    assertOrder(tuples, 1,1,15,15,3,4,5,7);
+    assertOrder(tuples, 1, 1, 15, 15, 3, 4, 5, 7);
 
     // Basic desc
     expression = StreamExpressionParser.parse("innerJoin("
@@ -1922,9 +1923,9 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     
     // Results in both searches, no join matches
     expression = StreamExpressionParser.parse("innerJoin("
-                                                + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\"),"
-                                                + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\", aliases=\"id=right.id, join1_i=right.join1_i, join2_s=right.join2_s, ident_s=right.ident_s\"),"
-                                                + "on=\"ident_s=right.ident_s\")");
+        + "search(" + COLLECTIONORALIAS + ", q=\"side_s:left\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\"),"
+        + "search(" + COLLECTIONORALIAS + ", q=\"side_s:right\", fl=\"id,join1_i,join2_s,ident_s\", sort=\"ident_s asc\", aliases=\"id=right.id, join1_i=right.join1_i, join2_s=right.join2_s, ident_s=right.ident_s\"),"
+        + "on=\"ident_s=right.ident_s\")");
     stream = new InnerJoinStream(expression, factory);
     tuples = getTuples(stream);    
     assert(tuples.size() == 0);
@@ -1938,7 +1939,7 @@ public class StreamExpressionTest extends SolrCloudTestCase {
     tuples = getTuples(stream);
     
     assert(tuples.size() == 8);
-    assertOrder(tuples, 1,1,15,15,3,4,5,7);
+    assertOrder(tuples, 1, 1, 15, 15, 3, 4, 5, 7);
 
   }
 
@@ -4348,6 +4349,126 @@ public class StreamExpressionTest extends SolrCloudTestCase {
   }
 
   @Test
+  public void testAnalyzeEvaluator() throws Exception {
+
+    UpdateRequest updateRequest = new UpdateRequest();
+    updateRequest.add(id, "1", "test_t", "l b c d c");
+    updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
+
+
+    SolrClientCache cache = new SolrClientCache();
+    try {
+
+      String expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t, test_t) as test_t)";
+      ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
+      paramsLoc.set("expr", expr);
+      paramsLoc.set("qt", "/stream");
+      String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
+
+      SolrStream solrStream = new SolrStream(url, paramsLoc);
+
+      StreamContext context = new StreamContext();
+      solrStream.setStreamContext(context);
+      List<Tuple> tuples = getTuples(solrStream);
+      assertTrue(tuples.size() == 5);
+
+      Tuple t = tuples.get(0);
+      assertTrue(t.getString("test_t").equals("l"));
+      assertTrue(t.getString("id").equals("1"));
+
+      t = tuples.get(1);
+      assertTrue(t.getString("test_t").equals("b"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      t = tuples.get(2);
+      assertTrue(t.getString("test_t").equals("c"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      t = tuples.get(3);
+      assertTrue(t.getString("test_t").equals("d"));
+      assertTrue(t.getString("id").equals("1"));
+
+      t = tuples.get(4);
+      assertTrue(t.getString("test_t").equals("c"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      //Try with single param
+      expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t) as test_t)";
+      paramsLoc = new ModifiableSolrParams();
+      paramsLoc.set("expr", expr);
+      paramsLoc.set("qt", "/stream");
+
+      solrStream = new SolrStream(url, paramsLoc);
+
+      context = new StreamContext();
+      solrStream.setStreamContext(context);
+      tuples = getTuples(solrStream);
+      assertTrue(tuples.size() == 5);
+
+      t = tuples.get(0);
+      assertTrue(t.getString("test_t").equals("l"));
+      assertTrue(t.getString("id").equals("1"));
+
+      t = tuples.get(1);
+      assertTrue(t.getString("test_t").equals("b"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      t = tuples.get(2);
+      assertTrue(t.getString("test_t").equals("c"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      t = tuples.get(3);
+      assertTrue(t.getString("test_t").equals("d"));
+      assertTrue(t.getString("id").equals("1"));
+
+      t = tuples.get(4);
+      assertTrue(t.getString("test_t").equals("c"));
+      assertTrue(t.getString("id").equals("1"));
+
+
+      //Try with null in the test_t field
+      expr = "cartesianProduct(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id\", sort=\"id desc\"), analyze(test_t, test_t) as test_t)";
+      paramsLoc = new ModifiableSolrParams();
+      paramsLoc.set("expr", expr);
+      paramsLoc.set("qt", "/stream");
+
+      solrStream = new SolrStream(url, paramsLoc);
+
+      context = new StreamContext();
+      solrStream.setStreamContext(context);
+      tuples = getTuples(solrStream);
+      assertTrue(tuples.size() == 1);
+
+      //Test annotating tuple
+      expr = "select(search("+COLLECTIONORALIAS+", q=\"*:*\", fl=\"id, test_t\", sort=\"id desc\"), analyze(test_t, test_t) as test1_t)";
+      paramsLoc = new ModifiableSolrParams();
+      paramsLoc.set("expr", expr);
+      paramsLoc.set("qt", "/stream");
+
+      solrStream = new SolrStream(url, paramsLoc);
+
+      context = new StreamContext();
+      solrStream.setStreamContext(context);
+      tuples = getTuples(solrStream);
+      assertTrue(tuples.size() == 1);
+      List l = (List)tuples.get(0).get("test1_t");
+      assertTrue(l.get(0).equals("l"));
+      assertTrue(l.get(1).equals("b"));
+      assertTrue(l.get(2).equals("c"));
+      assertTrue(l.get(3).equals("d"));
+      assertTrue(l.get(4).equals("c"));
+    } finally {
+      cache.close();
+    }
+  }
+
+
+  @Test
   public void testExecutorStream() throws Exception {
     CollectionAdminRequest.createCollection("workQueue", "conf", 2, 1).process(cluster.getSolrClient());
     AbstractDistribZkTestBase.waitForRecoveriesToFinish("workQueue", cluster.getSolrClient().getZkStateReader(),


[10/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10343: Update Solr default/example and test configs to use SynonymGraphFilterFactory

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/example/example-DIH/solr/solr/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/solr/conf/managed-schema b/solr/example/example-DIH/solr/solr/conf/managed-schema
index 04c85c0..f6c5103 100644
--- a/solr/example/example-DIH/solr/solr/conf/managed-schema
+++ b/solr/example/example-DIH/solr/solr/conf/managed-schema
@@ -443,14 +443,15 @@
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldType>
@@ -464,7 +465,8 @@
       <analyzer type="index">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -482,7 +484,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -510,7 +512,7 @@
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -526,7 +528,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -543,7 +545,7 @@
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -556,7 +558,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -580,7 +582,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/example/files/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/files/conf/managed-schema b/solr/example/files/conf/managed-schema
index ff209be..3c47c35 100644
--- a/solr/example/files/conf/managed-schema
+++ b/solr/example/files/conf/managed-schema
@@ -133,7 +133,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
       <filter class="solr.StopFilterFactory" words="lang/stopwords_en.txt" ignoreCase="true"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.EnglishPossessiveFilterFactory"/>
@@ -153,7 +153,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
       <filter class="solr.StopFilterFactory" words="lang/stopwords_en.txt" ignoreCase="true"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="0" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" catenateAll="0" catenateWords="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
@@ -164,7 +164,7 @@
   <fieldType name="text_en_splitting_tight" class="solr.TextField" autoGeneratePhraseQueries="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" expand="false" ignoreCase="true" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" expand="false" ignoreCase="true" synonyms="synonyms.txt"/>
       <filter class="solr.StopFilterFactory" words="lang/stopwords_en.txt" ignoreCase="true"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="0" generateWordParts="0" catenateAll="0" catenateWords="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
@@ -175,7 +175,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" expand="false" ignoreCase="true" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" expand="false" ignoreCase="true" synonyms="synonyms.txt"/>
       <filter class="solr.StopFilterFactory" words="lang/stopwords_en.txt" ignoreCase="true"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="0" generateWordParts="0" catenateAll="0" catenateWords="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
@@ -246,7 +246,7 @@
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>
       <filter class="solr.StopFilterFactory" words="stopwords.txt" ignoreCase="true"/>
-      <filter class="solr.SynonymFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
@@ -259,7 +259,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
       <filter class="solr.StopFilterFactory" words="stopwords.txt" ignoreCase="true"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/server/solr/configsets/basic_configs/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/basic_configs/conf/managed-schema b/solr/server/solr/configsets/basic_configs/conf/managed-schema
index 09aaae3..360a296 100644
--- a/solr/server/solr/configsets/basic_configs/conf/managed-schema
+++ b/solr/server/solr/configsets/basic_configs/conf/managed-schema
@@ -378,14 +378,15 @@
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldType>
@@ -400,7 +401,8 @@
       <analyzer type="index">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -418,7 +420,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -447,7 +449,7 @@
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -463,7 +465,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -481,7 +483,7 @@
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -494,7 +496,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -520,7 +522,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema b/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
index 0319eb0..2635a60 100644
--- a/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
+++ b/solr/server/solr/configsets/data_driven_schema_configs/conf/managed-schema
@@ -377,14 +377,15 @@
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldType>
@@ -399,7 +400,8 @@
       <analyzer type="index">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -417,7 +419,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -446,7 +448,7 @@
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -462,7 +464,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -480,7 +482,7 @@
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -493,7 +495,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -519,7 +521,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema b/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
index 17dadd4..2756516 100644
--- a/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
+++ b/solr/server/solr/configsets/sample_techproducts_configs/conf/managed-schema
@@ -487,14 +487,15 @@
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldType>
@@ -508,7 +509,8 @@
       <analyzer type="index">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -526,7 +528,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -554,7 +556,7 @@
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -570,7 +572,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -587,7 +589,7 @@
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -600,7 +602,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -624,7 +626,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>


[46/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10393: Adds UUID Streaming Evaluator

Posted by ab...@apache.org.
SOLR-10393: Adds UUID Streaming Evaluator


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/ef821834
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/ef821834
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/ef821834

Branch: refs/heads/jira/solr-9959
Commit: ef821834d15194c2c8b626d494b5119dd42b4f9f
Parents: 674ce4e
Author: Dennis Gove <dp...@gmail.com>
Authored: Fri Mar 31 20:52:42 2017 -0400
Committer: Dennis Gove <dp...@gmail.com>
Committed: Sat Apr 1 23:15:43 2017 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../org/apache/solr/handler/StreamHandler.java  |  2 +
 .../client/solrj/io/eval/UuidEvaluator.java     | 57 ++++++++++++++++++++
 .../solrj/io/stream/eval/UuidEvaluatorTest.java | 52 ++++++++++++++++++
 4 files changed, 113 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ef821834/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 1c3aaf7..e30824f 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -63,6 +63,8 @@ New Features
 
 * SOLR-10356: Adds basic math Streaming Evaluators (Dennis Gove)
 
+* SOLR-10393: Adds UUID Streaming Evaluator (Dennis Gove)
+
 Bug Fixes
 ----------------------
 * SOLR-9262: Connection and read timeouts are being ignored by UpdateShardHandler after SOLR-4509.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ef821834/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
index b508754..599924e 100644
--- a/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/StreamHandler.java
@@ -67,6 +67,7 @@ import org.apache.solr.client.solrj.io.eval.SineEvaluator;
 import org.apache.solr.client.solrj.io.eval.SquareRootEvaluator;
 import org.apache.solr.client.solrj.io.eval.SubtractEvaluator;
 import org.apache.solr.client.solrj.io.eval.TangentEvaluator;
+import org.apache.solr.client.solrj.io.eval.UuidEvaluator;
 import org.apache.solr.client.solrj.io.graph.GatherNodesStream;
 import org.apache.solr.client.solrj.io.graph.ShortestPathStream;
 import org.apache.solr.client.solrj.io.ops.ConcatOperation;
@@ -277,6 +278,7 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
       .withFunctionName("sqrt", SquareRootEvaluator.class)
       .withFunctionName("cbrt", CubedRootEvaluator.class)
       .withFunctionName("coalesce", CoalesceEvaluator.class)
+      .withFunctionName("uuid", UuidEvaluator.class)
       
       // Conditional Stream Evaluators
       .withFunctionName("if", IfThenElseEvaluator.class)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ef821834/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/UuidEvaluator.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/UuidEvaluator.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/UuidEvaluator.java
new file mode 100644
index 0000000..88acee4
--- /dev/null
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/eval/UuidEvaluator.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ */
+package org.apache.solr.client.solrj.io.eval;
+
+import java.io.IOException;
+import java.util.UUID;
+
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.stream.expr.Explanation;
+import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
+import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionParameter;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+
+public class UuidEvaluator extends SimpleEvaluator {
+  protected static final long serialVersionUID = 1L;
+
+  public UuidEvaluator(StreamExpression expression, StreamFactory factory) throws IOException{
+    // no parameters are used
+  }
+
+  @Override
+  public UUID evaluate(Tuple tuple) throws IOException {
+    return UUID.randomUUID();
+  }
+
+  @Override
+  public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException {
+    return new StreamExpression(factory.getFunctionName(getClass()));
+  }
+
+  @Override
+  public Explanation toExplanation(StreamFactory factory) throws IOException {
+    return new Explanation(nodeId.toString())
+      .withExpressionType(ExpressionType.EVALUATOR)
+      .withImplementingClass(getClass().getName())
+      .withExpression(toExpression(factory).toString());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/ef821834/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/UuidEvaluatorTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/UuidEvaluatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/UuidEvaluatorTest.java
new file mode 100644
index 0000000..682b600
--- /dev/null
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/eval/UuidEvaluatorTest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.client.solrj.io.stream.eval;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.UUID;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.io.Tuple;
+import org.apache.solr.client.solrj.io.eval.StreamEvaluator;
+import org.apache.solr.client.solrj.io.eval.UuidEvaluator;
+import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
+import org.junit.Test;
+
+import junit.framework.Assert;
+
+public class UuidEvaluatorTest extends LuceneTestCase {
+
+  StreamFactory factory;
+  Map<String, Object> values;
+  
+  public UuidEvaluatorTest() {
+    super();
+    
+    factory = new StreamFactory()
+      .withFunctionName("uuid", UuidEvaluator.class);
+    values = new HashMap<String,Object>();
+  }
+    
+  @Test
+  public void isUuidType() throws Exception{
+    StreamEvaluator evaluator = factory.constructEvaluator("uuid()");
+    
+    Assert.assertTrue(evaluator.evaluate(null) instanceof UUID);
+    Assert.assertTrue(evaluator.evaluate(new Tuple(values)) instanceof UUID);
+  }    
+}


[51/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-9959 Fix precommit issues.

Posted by ab...@apache.org.
SOLR-9959 Fix precommit issues.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/14be5a9e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/14be5a9e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/14be5a9e

Branch: refs/heads/jira/solr-9959
Commit: 14be5a9e3b34e82408a01ec7afe65ae234f6ac86
Parents: 7ca861f
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Mon Apr 3 19:12:04 2017 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Mon Apr 3 19:12:04 2017 +0200

----------------------------------------------------------------------
 .../java/org/apache/solr/core/SolrConfig.java   | 37 +++-----------------
 .../src/java/org/apache/solr/core/SolrCore.java |  1 -
 .../apache/solr/handler/RequestHandlerBase.java |  3 --
 .../solr/handler/component/SearchComponent.java |  1 -
 .../solr/highlight/HighlightingPluginBase.java  |  3 --
 .../solr/metrics/reporters/SolrJmxReporter.java |  9 ++---
 .../org/apache/solr/update/UpdateHandler.java   |  2 --
 .../apache/solr/update/UpdateShardHandler.java  |  2 --
 8 files changed, 7 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/14be5a9e/solr/core/src/java/org/apache/solr/core/SolrConfig.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrConfig.java b/solr/core/src/java/org/apache/solr/core/SolrConfig.java
index ae7f43c..4e7ab48 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrConfig.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrConfig.java
@@ -276,6 +276,10 @@ public class SolrConfig extends Config implements MapSerializable {
     hashSetInverseLoadFactor = 1.0f / getFloat("//HashDocSet/@loadFactor", 0.75f);
     hashDocSetMaxSize = getInt("//HashDocSet/@maxSize", 3000);
 
+    if (get("jmx", null) != null) {
+      log.warn("solrconfig.xml: <jmx> is no longer supported, use solr.xml:/metrics/reporter section instead");
+    }
+
     httpCachingConfig = new HttpCachingConfig(this);
 
     maxWarmingSearchers = getInt("query/maxWarmingSearchers", 1);
@@ -506,39 +510,6 @@ public class SolrConfig extends Config implements MapSerializable {
     return httpCachingConfig;
   }
 
-  public static class JmxConfiguration implements MapSerializable {
-    public boolean enabled = false;
-    public String agentId;
-    public String serviceUrl;
-    public String rootName;
-
-    public JmxConfiguration(boolean enabled,
-                            String agentId,
-                            String serviceUrl,
-                            String rootName) {
-      this.enabled = enabled;
-      this.agentId = agentId;
-      this.serviceUrl = serviceUrl;
-      this.rootName = rootName;
-
-      if (agentId != null && serviceUrl != null) {
-        throw new SolrException
-            (SolrException.ErrorCode.SERVER_ERROR,
-                "Incorrect JMX Configuration in solrconfig.xml, " +
-                    "both agentId and serviceUrl cannot be specified at the same time");
-      }
-
-    }
-
-    @Override
-    public Map<String, Object> toMap(Map<String, Object> map) {
-      map.put("agentId", agentId);
-      map.put("serviceUrl", serviceUrl);
-      map.put("rootName", rootName);
-      return map;
-    }
-  }
-
   public static class HttpCachingConfig implements MapSerializable {
 
     /**

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/14be5a9e/solr/core/src/java/org/apache/solr/core/SolrCore.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java
index dfef75a..4ed7006 100644
--- a/solr/core/src/java/org/apache/solr/core/SolrCore.java
+++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java
@@ -160,7 +160,6 @@ import org.apache.solr.util.RefCounted;
 import org.apache.solr.util.plugin.NamedListInitializedPlugin;
 import org.apache.solr.util.plugin.PluginInfoInitialized;
 import org.apache.solr.util.plugin.SolrCoreAware;
-import org.apache.solr.util.stats.MetricUtils;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/14be5a9e/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java b/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
index 4d0f8ab..4219768 100644
--- a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
+++ b/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java
@@ -18,9 +18,7 @@ package org.apache.solr.handler;
 
 import java.lang.invoke.MethodHandles;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Map;
 import java.util.Set;
 
 import com.codahale.metrics.MetricRegistry;
@@ -45,7 +43,6 @@ import org.apache.solr.util.SolrPluginUtils;
 import org.apache.solr.api.Api;
 import org.apache.solr.api.ApiBag;
 import org.apache.solr.api.ApiSupport;
-import org.apache.solr.util.stats.MetricUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/14be5a9e/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java b/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java
index dc82993..c615c5a 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/SearchComponent.java
@@ -28,7 +28,6 @@ import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.SolrInfoBean;
 import org.apache.solr.search.facet.FacetModule;
 import org.apache.solr.util.plugin.NamedListInitializedPlugin;
-import org.apache.solr.util.stats.MetricUtils;
 
 /**
  * TODO!

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/14be5a9e/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java b/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
index de9c14c..7acaacd 100644
--- a/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
+++ b/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java
@@ -16,9 +16,7 @@
  */
 package org.apache.solr.highlight;
 
-import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Map;
 import java.util.Set;
 
 import com.codahale.metrics.Counter;
@@ -28,7 +26,6 @@ import org.apache.solr.common.util.NamedList;
 import org.apache.solr.core.SolrInfoBean;
 import org.apache.solr.metrics.SolrMetricManager;
 import org.apache.solr.metrics.SolrMetricProducer;
-import org.apache.solr.util.stats.MetricUtils;
 
 /**
  * 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/14be5a9e/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
index e123e44..e93acb9 100644
--- a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
+++ b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
@@ -16,11 +16,7 @@
  */
 package org.apache.solr.metrics.reporters;
 
-import javax.management.InstanceAlreadyExistsException;
-import javax.management.InstanceNotFoundException;
-import javax.management.MBeanRegistrationException;
 import javax.management.MBeanServer;
-import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 
 import java.io.IOException;
@@ -106,7 +102,7 @@ public class SolrJmxReporter extends SolrMetricReporter {
 
     JmxObjectNameFactory jmxObjectNameFactory = new JmxObjectNameFactory(pluginInfo.name, domain);
     registry = metricManager.registry(registryName);
-    // filter out MetricsMap gauges
+    // filter out MetricsMap gauges - we have a better way of handling them
     MetricFilter filter = (name, metric) -> !(metric instanceof MetricsMap);
 
     reporter = JmxReporter.forRegistry(registry)
@@ -116,7 +112,7 @@ public class SolrJmxReporter extends SolrMetricReporter {
                           .createsObjectNamesWith(jmxObjectNameFactory)
                           .build();
     reporter.start();
-    // work around for inability to register custom MBeans
+    // workaround for inability to register custom MBeans (to be available in metrics 4.0?)
     listener = new MetricsMapListener(mBeanServer, jmxObjectNameFactory);
     registry.addListener(listener);
 
@@ -222,6 +218,7 @@ public class SolrJmxReporter extends SolrMetricReporter {
   private static class MetricsMapListener extends MetricRegistryListener.Base {
     MBeanServer server;
     JmxObjectNameFactory nameFactory;
+    // keep the names so that we can unregister them on core close
     List<ObjectName> registered = new ArrayList<>();
 
     MetricsMapListener(MBeanServer server, JmxObjectNameFactory nameFactory) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/14be5a9e/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
index 0ef5484..49d2664 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java
@@ -20,7 +20,6 @@ package org.apache.solr.update;
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
 import java.util.HashSet;
-import java.util.Map;
 import java.util.Set;
 import java.util.Vector;
 
@@ -34,7 +33,6 @@ import org.apache.solr.core.SolrInfoBean;
 import org.apache.solr.schema.FieldType;
 import org.apache.solr.schema.SchemaField;
 import org.apache.solr.util.plugin.SolrCoreAware;
-import org.apache.solr.util.stats.MetricUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/14be5a9e/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
index d244a85..ca8cea5 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
@@ -18,11 +18,9 @@ package org.apache.solr.update;
 
 import java.lang.invoke.MethodHandles;
 import java.util.HashSet;
-import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ExecutorService;
 
-import com.codahale.metrics.InstrumentedExecutorService;
 import com.codahale.metrics.MetricRegistry;
 import org.apache.http.client.HttpClient;
 import org.apache.http.impl.client.CloseableHttpClient;


[07/52] [abbrv] lucene-solr:jira/solr-9959: LUCENE-7751: Avoid boxing primitives only to call compareTo.

Posted by ab...@apache.org.
LUCENE-7751: Avoid boxing primitives only to call compareTo.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/c189139e
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/c189139e
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/c189139e

Branch: refs/heads/jira/solr-9959
Commit: c189139e5222d2a8b6fba6bfc5c1194d68b46a77
Parents: 53064e4
Author: Adrien Grand <jp...@gmail.com>
Authored: Tue Mar 28 15:21:35 2017 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Tue Mar 28 15:35:04 2017 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                                              | 3 +++
 lucene/core/src/java/org/apache/lucene/util/VirtualMethod.java  | 2 +-
 .../src/java/org/apache/solr/spelling/SpellCheckCollation.java  | 2 +-
 .../solr/handler/component/DistributedFacetPivotSmallTest.java  | 5 ++---
 solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java | 4 ++--
 5 files changed, 9 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c189139e/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index e6da586..a8f24c7 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -87,6 +87,9 @@ Other
 * LUCENE-7754: Inner classes should be static whenever possible.
   (Daniel Jelinski via Adrien Grand)
 
+* LUCENE-7751: Avoid boxing primitives only to call compareTo.
+  (Daniel Jelinski via Adrien Grand)
+
 ======================= Lucene 6.5.0 =======================
 
 API Changes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c189139e/lucene/core/src/java/org/apache/lucene/util/VirtualMethod.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/util/VirtualMethod.java b/lucene/core/src/java/org/apache/lucene/util/VirtualMethod.java
index f21c16b..ea56862 100644
--- a/lucene/core/src/java/org/apache/lucene/util/VirtualMethod.java
+++ b/lucene/core/src/java/org/apache/lucene/util/VirtualMethod.java
@@ -143,7 +143,7 @@ public final class VirtualMethod<C> {
   public static <C> int compareImplementationDistance(final Class<? extends C> clazz,
     final VirtualMethod<C> m1, final VirtualMethod<C> m2)
   {
-    return Integer.valueOf(m1.getImplementationDistance(clazz)).compareTo(m2.getImplementationDistance(clazz));
+    return Integer.compare(m1.getImplementationDistance(clazz), m2.getImplementationDistance(clazz));
   }
   
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c189139e/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollation.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollation.java b/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollation.java
index bef3ec6..7f3bd95 100644
--- a/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollation.java
+++ b/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollation.java
@@ -26,7 +26,7 @@ public class SpellCheckCollation implements Comparable<SpellCheckCollation> {
 
   @Override
   public int compareTo(SpellCheckCollation scc) {
-    int c = new Integer(internalRank).compareTo(scc.internalRank);
+    int c = Integer.compare(internalRank, scc.internalRank);
     if (c == 0) {
       return collationQuery.compareTo(scc.collationQuery);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c189139e/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetPivotSmallTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetPivotSmallTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetPivotSmallTest.java
index fc7af80..d293e69 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetPivotSmallTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedFacetPivotSmallTest.java
@@ -1636,8 +1636,7 @@ public class DistributedFacetPivotSmallTest extends BaseDistributedSearchTestCas
     
     @Override
     public int compare(PivotField o1, PivotField o2) {
-      Integer compare = (Integer.valueOf(o2.getCount())).compareTo(Integer
-          .valueOf(o1.getCount()));
+      int compare = Integer.compare(o2.getCount(), o1.getCount());
       if (compare == 0) {
         compare = ((String) o2.getValue()).compareTo((String) o1.getValue());
       }
@@ -1650,7 +1649,7 @@ public class DistributedFacetPivotSmallTest extends BaseDistributedSearchTestCas
           }
         }
         if (compare == 0) {
-          compare = Integer.valueOf(o1.getFacetQuery().size()).compareTo(
+          compare = Integer.compare(o1.getFacetQuery().size(),
               o2.getFacetQuery().size());
         }
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/c189139e/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java b/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java
index 6fffb3a..846c25e 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java
@@ -134,8 +134,8 @@ public abstract class DocRouter {
 
     @Override
     public int compareTo(Range that) {
-      int mincomp = Integer.valueOf(this.min).compareTo(that.min);
-      return mincomp == 0 ? Integer.valueOf(this.max).compareTo(that.max) : mincomp;
+      int mincomp = Integer.compare(this.min, that.min);
+      return mincomp == 0 ? Integer.compare(this.max, that.max) : mincomp;
     }
   }
 


[21/52] [abbrv] lucene-solr:jira/solr-9959: remove dead code

Posted by ab...@apache.org.
remove dead code


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1ace1740
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1ace1740
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1ace1740

Branch: refs/heads/jira/solr-9959
Commit: 1ace1740d96859009dacc41bce1941f1fb31e497
Parents: a6f27d3
Author: Mike McCandless <mi...@apache.org>
Authored: Wed Mar 29 08:48:26 2017 -0400
Committer: Mike McCandless <mi...@apache.org>
Committed: Wed Mar 29 08:48:26 2017 -0400

----------------------------------------------------------------------
 lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1ace1740/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
index 5ab16c2..c5d9ddc 100644
--- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -414,7 +414,6 @@ public class IndexSearcher {
       throw new IllegalArgumentException("after.doc exceeds the number of documents in the reader: after.doc="
           + after.doc + " limit=" + limit);
     }
-    numHits = Math.min(numHits, limit);
 
     final int cappedNumHits = Math.min(numHits, limit);
 


[23/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10352: Fixing available entropy warning limit to 300

Posted by ab...@apache.org.
SOLR-10352: Fixing available entropy warning limit to 300


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/efdb04d0
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/efdb04d0
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/efdb04d0

Branch: refs/heads/jira/solr-9959
Commit: efdb04d06c9d37b543ab0469c65f3474c34d455a
Parents: 30f7914
Author: Ishan Chattopadhyaya <is...@apache.org>
Authored: Wed Mar 29 19:22:02 2017 +0530
Committer: Ishan Chattopadhyaya <is...@apache.org>
Committed: Wed Mar 29 19:22:02 2017 +0530

----------------------------------------------------------------------
 solr/bin/solr | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/efdb04d0/solr/bin/solr
----------------------------------------------------------------------
diff --git a/solr/bin/solr b/solr/bin/solr
index 27ec054..903309b 100755
--- a/solr/bin/solr
+++ b/solr/bin/solr
@@ -1771,7 +1771,7 @@ function launch_solr() {
 	1>"$SOLR_LOGS_DIR/solr-$SOLR_PORT-console.log" 2>&1 & echo $! > "$SOLR_PID_DIR/solr-$SOLR_PORT.pid"
 
     # check if /proc/sys/kernel/random/entropy_avail exists then check output of cat /proc/sys/kernel/random/entropy_avail to see if less than 300
-    if [[ -f /proc/sys/kernel/random/entropy_avail ]] && (( `cat /proc/sys/kernel/random/entropy_avail` < 30000)); then
+    if [[ -f /proc/sys/kernel/random/entropy_avail ]] && (( `cat /proc/sys/kernel/random/entropy_avail` < 300)); then
 	echo "Warning: Available entropy is low. As a result, use of the UUIDField, SSL, or any other features that require"
 	echo "RNG might not work properly. To check for the amount of available entropy, use 'cat /proc/sys/kernel/random/entropy_avail'."
 	echo ""


[36/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10399: cleanup unused imports

Posted by ab...@apache.org.
SOLR-10399: cleanup unused imports


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/35aac1d4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/35aac1d4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/35aac1d4

Branch: refs/heads/jira/solr-9959
Commit: 35aac1d4623a34fe6b03d934eefd9066c61a95d5
Parents: b60b86e
Author: Chris Hostetter <ho...@apache.org>
Authored: Fri Mar 31 18:16:13 2017 -0700
Committer: Chris Hostetter <ho...@apache.org>
Committed: Fri Mar 31 18:16:13 2017 -0700

----------------------------------------------------------------------
 solr/core/src/java/org/apache/solr/request/SimpleFacets.java | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/35aac1d4/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
index 8972121..0bfef4c 100644
--- a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
+++ b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java
@@ -42,7 +42,6 @@ import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiPostingsEnum;
 import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.search.BooleanClause.Occur;
@@ -52,7 +51,6 @@ import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.FilterCollector;
 import org.apache.lucene.search.LeafCollector;
 import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.grouping.AllGroupHeadsCollector;
 import org.apache.lucene.search.grouping.term.TermAllGroupsCollector;
 import org.apache.lucene.search.grouping.term.TermGroupFacetCollector;


[41/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-7383: Replace DIH 'rss' example with 'atom' rss example was broken for multiple reasons. atom example showcases the same - and more - features and uses the smallest config file needed to make it work.

Posted by ab...@apache.org.
SOLR-7383: Replace DIH 'rss' example with 'atom'
rss example was broken for multiple reasons.
atom example showcases the same - and more - features
and uses the smallest config file needed to make it work.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/580f6e98
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/580f6e98
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/580f6e98

Branch: refs/heads/jira/solr-9959
Commit: 580f6e98fb033dbbb8e0921fc3175021714ce956
Parents: 35aac1d
Author: Alexandre Rafalovitch <ar...@apache.org>
Authored: Sat Apr 1 13:42:23 2017 -0400
Committer: Alexandre Rafalovitch <ar...@apache.org>
Committed: Sat Apr 1 13:42:23 2017 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                |    2 +
 solr/README.txt                                 |    2 +-
 solr/example/README.txt                         |    2 +-
 solr/example/example-DIH/README.txt             |   11 +-
 .../solr/atom/conf/atom-data-config.xml         |   35 +
 .../solr/atom/conf/lang/stopwords_en.txt        |   54 +
 .../example-DIH/solr/atom/conf/managed-schema   |  106 +
 .../example-DIH/solr/atom/conf/protwords.txt    |   17 +
 .../example-DIH/solr/atom/conf/solrconfig.xml   |   61 +
 .../example-DIH/solr/atom/conf/synonyms.txt     |   29 +
 .../example-DIH/solr/atom/conf/url_types.txt    |    1 +
 .../example-DIH/solr/atom/core.properties       |    0
 .../example-DIH/solr/rss/conf/admin-extra.html  |   24 -
 .../solr/rss/conf/admin-extra.menu-bottom.html  |   25 -
 .../solr/rss/conf/admin-extra.menu-top.html     |   25 -
 .../clustering/carrot2/kmeans-attributes.xml    |   19 -
 .../clustering/carrot2/lingo-attributes.xml     |   24 -
 .../conf/clustering/carrot2/stc-attributes.xml  |   19 -
 .../example-DIH/solr/rss/conf/currency.xml      |   67 -
 .../example-DIH/solr/rss/conf/elevate.xml       |   42 -
 .../solr/rss/conf/lang/contractions_ca.txt      |    8 -
 .../solr/rss/conf/lang/contractions_fr.txt      |   15 -
 .../solr/rss/conf/lang/contractions_ga.txt      |    5 -
 .../solr/rss/conf/lang/contractions_it.txt      |   23 -
 .../solr/rss/conf/lang/hyphenations_ga.txt      |    5 -
 .../solr/rss/conf/lang/stemdict_nl.txt          |    6 -
 .../solr/rss/conf/lang/stoptags_ja.txt          |  420 --
 .../solr/rss/conf/lang/stopwords_ar.txt         |  125 -
 .../solr/rss/conf/lang/stopwords_bg.txt         |  193 -
 .../solr/rss/conf/lang/stopwords_ca.txt         |  220 -
 .../solr/rss/conf/lang/stopwords_ckb.txt        |  136 -
 .../solr/rss/conf/lang/stopwords_cz.txt         |  172 -
 .../solr/rss/conf/lang/stopwords_da.txt         |  110 -
 .../solr/rss/conf/lang/stopwords_de.txt         |  294 --
 .../solr/rss/conf/lang/stopwords_el.txt         |   78 -
 .../solr/rss/conf/lang/stopwords_en.txt         |   54 -
 .../solr/rss/conf/lang/stopwords_es.txt         |  356 --
 .../solr/rss/conf/lang/stopwords_eu.txt         |   99 -
 .../solr/rss/conf/lang/stopwords_fa.txt         |  313 --
 .../solr/rss/conf/lang/stopwords_fi.txt         |   97 -
 .../solr/rss/conf/lang/stopwords_fr.txt         |  186 -
 .../solr/rss/conf/lang/stopwords_ga.txt         |  110 -
 .../solr/rss/conf/lang/stopwords_gl.txt         |  161 -
 .../solr/rss/conf/lang/stopwords_hi.txt         |  235 --
 .../solr/rss/conf/lang/stopwords_hu.txt         |  211 -
 .../solr/rss/conf/lang/stopwords_hy.txt         |   46 -
 .../solr/rss/conf/lang/stopwords_id.txt         |  359 --
 .../solr/rss/conf/lang/stopwords_it.txt         |  303 --
 .../solr/rss/conf/lang/stopwords_ja.txt         |  127 -
 .../solr/rss/conf/lang/stopwords_lv.txt         |  172 -
 .../solr/rss/conf/lang/stopwords_nl.txt         |  119 -
 .../solr/rss/conf/lang/stopwords_no.txt         |  194 -
 .../solr/rss/conf/lang/stopwords_pt.txt         |  253 --
 .../solr/rss/conf/lang/stopwords_ro.txt         |  233 --
 .../solr/rss/conf/lang/stopwords_ru.txt         |  243 --
 .../solr/rss/conf/lang/stopwords_sv.txt         |  133 -
 .../solr/rss/conf/lang/stopwords_th.txt         |  119 -
 .../solr/rss/conf/lang/stopwords_tr.txt         |  212 -
 .../solr/rss/conf/lang/userdict_ja.txt          |   29 -
 .../example-DIH/solr/rss/conf/managed-schema    | 1096 -----
 .../solr/rss/conf/mapping-FoldToASCII.txt       | 3813 ------------------
 .../solr/rss/conf/mapping-ISOLatin1Accent.txt   |  246 --
 .../example-DIH/solr/rss/conf/protwords.txt     |   21 -
 .../solr/rss/conf/rss-data-config.xml           |   26 -
 .../example-DIH/solr/rss/conf/solrconfig.xml    | 1396 -------
 .../example-DIH/solr/rss/conf/spellings.txt     |    2 -
 .../example-DIH/solr/rss/conf/stopwords.txt     |   14 -
 .../example-DIH/solr/rss/conf/synonyms.txt      |   29 -
 .../example-DIH/solr/rss/conf/update-script.js  |   53 -
 .../example-DIH/solr/rss/conf/xslt/example.xsl  |  132 -
 .../solr/rss/conf/xslt/example_atom.xsl         |   67 -
 .../solr/rss/conf/xslt/example_rss.xsl          |   66 -
 .../example-DIH/solr/rss/conf/xslt/luke.xsl     |  337 --
 .../solr/rss/conf/xslt/updateXml.xsl            |   70 -
 .../example-DIH/solr/rss/core.properties        |    0
 75 files changed, 312 insertions(+), 13795 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 1efefd0..3187dc3 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -180,6 +180,8 @@ Other Changes
 
 * SOLR-10399: Generalize some internal facet logic to simplify points/non-points field handling (Adrien Grand, hossman)
 
+* SOLR-7383: New DataImportHandler 'atom' example, replacing broken 'rss' example (Alexandre Rafalovitch)
+
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/README.txt
----------------------------------------------------------------------
diff --git a/solr/README.txt b/solr/README.txt
index 4ef5eac..6af0cc6 100644
--- a/solr/README.txt
+++ b/solr/README.txt
@@ -67,7 +67,7 @@ Solr includes a few examples to help you get started. To run a specific example,
   bin/solr -e <EXAMPLE> where <EXAMPLE> is one of:
 
     cloud        : SolrCloud example
-    dih          : Data Import Handler (rdbms, mail, rss, tika)
+    dih          : Data Import Handler (rdbms, mail, atom, tika)
     schemaless   : Schema-less example (schema is inferred from data during indexing)
     techproducts : Kitchen sink example providing comprehensive examples of Solr features
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/README.txt
----------------------------------------------------------------------
diff --git a/solr/example/README.txt b/solr/example/README.txt
index d8402eb..4c8cca1 100644
--- a/solr/example/README.txt
+++ b/solr/example/README.txt
@@ -22,7 +22,7 @@ separate directory. To run a specific example, do:
   bin/solr -e <EXAMPLE> where <EXAMPLE> is one of:
   
     cloud        : SolrCloud example
-    dih          : Data Import Handler (rdbms, mail, rss, tika)
+    dih          : Data Import Handler (rdbms, mail, atom, tika)
     schemaless   : Schema-less example (schema is inferred from data during indexing)
     techproducts : Kitchen sink example providing comprehensive examples of Solr features
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/README.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/README.txt b/solr/example/example-DIH/README.txt
index 0926bb6..ea8d28f 100644
--- a/solr/example/example-DIH/README.txt
+++ b/solr/example/example-DIH/README.txt
@@ -16,7 +16,7 @@
 Solr DataImportHandler example configuration
 --------------------------------------------
 
-To run this example, use the "-e" option of the bin/solr script:
+To run this multi-core example, use the "-e" option of the bin/solr script:
 
 > bin/solr -e dih
 
@@ -28,9 +28,9 @@ When Solr is started connect to:
 
   http://localhost:8983/solr/db/dataimport?command=full-import
 
-* To import data from an RSS feed, connect to:
+* To import data from an ATOM feed, connect to:
 
-  http://localhost:8983/solr/rss/dataimport?command=full-import
+  http://localhost:8983/solr/atom/dataimport?command=full-import
 
 * To import data from your IMAP server:
 
@@ -45,6 +45,5 @@ When Solr is started connect to:
 
   http://localhost:8983/solr/tika/dataimport?command=full-import
 
-See also README.txt in the solr subdirectory, and check
-http://wiki.apache.org/solr/DataImportHandler for detailed
-usage guide and tutorial.
+Check also the Solr Reference Guide for detailed usage guide:
+https://cwiki.apache.org/confluence/display/solr/Uploading+Structured+Data+Store+Data+with+the+Data+Import+Handler

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/atom/conf/atom-data-config.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/atom/conf/atom-data-config.xml b/solr/example/example-DIH/solr/atom/conf/atom-data-config.xml
new file mode 100644
index 0000000..53b5060
--- /dev/null
+++ b/solr/example/example-DIH/solr/atom/conf/atom-data-config.xml
@@ -0,0 +1,35 @@
+<dataConfig>
+  <dataSource type="URLDataSource"/>
+  <document>
+
+    <entity name="stackoverflow"
+            url="http://stackoverflow.com/feeds/tag/solr"
+            processor="XPathEntityProcessor"
+            forEach="/feed|/feed/entry"
+            transformer="HTMLStripTransformer,RegexTransformer">
+
+      <!-- Pick this value up from the feed level and apply to all documents -->
+      <field column="lastchecked_dt" xpath="/feed/updated" commonField="true"/>
+
+      <!-- Keep only the final numeric part of the URL -->
+      <field column="id" xpath="/feed/entry/id" regex=".*/" replaceWith=""/>
+
+      <field column="title"    xpath="/feed/entry/title"/>
+      <field column="author"   xpath="/feed/entry/author/name"/>
+      <field column="category" xpath="/feed/entry/category/@term"/>
+      <field column="link"     xpath="/feed/entry/link[@rel='alternate']/@href"/>
+
+      <!-- Use transformers to convert HTML into plain text.
+        There is also an UpdateRequestProcess to trim remaining spaces.
+      -->
+      <field column="summary" xpath="/feed/entry/summary" stripHTML="true" regex="( |\n)+" replaceWith=" "/>
+
+      <!-- Ignore namespaces when matching XPath -->
+      <field column="rank" xpath="/feed/entry/rank"/>
+
+      <field column="published_dt" xpath="/feed/entry/published"/>
+      <field column="updated_dt" xpath="/feed/entry/updated"/>
+    </entity>
+
+  </document>
+</dataConfig>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/atom/conf/lang/stopwords_en.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/atom/conf/lang/stopwords_en.txt b/solr/example/example-DIH/solr/atom/conf/lang/stopwords_en.txt
new file mode 100644
index 0000000..2c164c0
--- /dev/null
+++ b/solr/example/example-DIH/solr/atom/conf/lang/stopwords_en.txt
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# a couple of test stopwords to test that the words are really being
+# configured from this file:
+stopworda
+stopwordb
+
+# Standard english stop words taken from Lucene's StopAnalyzer
+a
+an
+and
+are
+as
+at
+be
+but
+by
+for
+if
+in
+into
+is
+it
+no
+not
+of
+on
+or
+such
+that
+the
+their
+then
+there
+these
+they
+this
+to
+was
+will
+with

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/atom/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/atom/conf/managed-schema b/solr/example/example-DIH/solr/atom/conf/managed-schema
new file mode 100644
index 0000000..5875152
--- /dev/null
+++ b/solr/example/example-DIH/solr/atom/conf/managed-schema
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<schema name="example-DIH-atom" version="1.6">
+  <uniqueKey>id</uniqueKey>
+
+  <field name="id" type="string" indexed="true" stored="true" required="true"/>
+  <field name="title" type="text_en_splitting" indexed="true" stored="true"/>
+  <field name="author" type="string" indexed="true" stored="true"/>
+  <field name="category" type="string" indexed="true" stored="true" multiValued="true"/>
+  <field name="link" type="string" indexed="true" stored="true"/>
+  <field name="summary" type="text_en_splitting" indexed="true" stored="true"/>
+  <field name="rank" type="pint" indexed="true" stored="true"/>
+
+  <dynamicField name="*_dt" type="pdate" indexed="true" stored="true"/>
+
+  <!-- Catch-all field, aggregating all "useful to search as text" fields via the copyField instructions -->
+  <field name="text" type="text_en_splitting" indexed="true" stored="false" multiValued="true"/>
+
+  <field name="urls" type="url_only" indexed="true" stored="false"/>
+
+
+  <copyField source="id" dest="text"/>
+  <copyField source="title" dest="text"/>
+  <copyField source="author" dest="text"/>
+  <copyField source="category" dest="text"/>
+  <copyField source="summary" dest="text"/>
+
+  <!-- extract URLs from summary for faceting -->
+  <copyField source="summary" dest="urls"/>
+
+  <fieldType name="string" class="solr.StrField" sortMissingLast="true" docValues="true"/>
+  <fieldType name="pint" class="solr.IntPointField" docValues="true"/>
+  <fieldType name="pdate" class="solr.DatePointField" docValues="true"/>
+
+
+  <!-- A text field with defaults appropriate for English, plus
+   aggressive word-splitting and autophrase features enabled.
+   This field is just like text_en, except it adds
+   WordDelimiterFilter to enable splitting and matching of
+   words on case-change, alpha numeric boundaries, and
+   non-alphanumeric chars.  This means certain compound word
+   cases will work, for example query "wi fi" will match
+   document "WiFi" or "wi-fi".
+  -->
+  <fieldType name="text_en_splitting" class="solr.TextField"
+             positionIncrementGap="100" autoGeneratePhraseQueries="true">
+    <analyzer type="index">
+      <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+      <!-- in this example, we will only use synonyms at query time
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      -->
+      <!-- Case insensitive stop word removal. -->
+      <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1"
+              catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.StopFilterFactory"
+              ignoreCase="true"
+              words="lang/stopwords_en.txt"
+      />
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1"
+              catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+    </analyzer>
+  </fieldType>
+
+  <!-- Field type that extracts URLs from the text.
+   As the stored representation is not changed, it is only useful for faceting.
+   It is not terribly useful for searching URLs either, as there are too many special symbols.
+  -->
+  <fieldType name="url_only" class="solr.TextField" positionIncrementGap="100">
+    <analyzer type="index">
+      <tokenizer class="solr.UAX29URLEmailTokenizerFactory" maxTokenLength="255"/>
+      <filter class="solr.TypeTokenFilterFactory" types="url_types.txt" useWhitelist="true"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.KeywordTokenizerFactory"/>
+    </analyzer>
+  </fieldType>
+
+</schema>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/atom/conf/protwords.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/atom/conf/protwords.txt b/solr/example/example-DIH/solr/atom/conf/protwords.txt
new file mode 100644
index 0000000..1303e42
--- /dev/null
+++ b/solr/example/example-DIH/solr/atom/conf/protwords.txt
@@ -0,0 +1,17 @@
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#-----------------------------------------------------------------------
+# Use a protected word file to protect against the stemmer reducing two
+# unrelated words to the same base word.
+
+lucene

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/atom/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/atom/conf/solrconfig.xml b/solr/example/example-DIH/solr/atom/conf/solrconfig.xml
new file mode 100644
index 0000000..22005dd
--- /dev/null
+++ b/solr/example/example-DIH/solr/atom/conf/solrconfig.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- 
+ This is a DEMO configuration, highlighting elements 
+ specifically needed to get this example running
+ such as libraries and request handler specifics.
+
+ It uses defaults or does not define most of production-level settings
+ such as various caches or auto-commit policies.
+
+ See Solr Reference Guide and other examples for 
+ more details on a well configured solrconfig.xml
+ https://cwiki.apache.org/confluence/display/solr/The+Well-Configured+Solr+Instance
+-->
+<config>
+
+  <!-- Controls what version of Lucene various components of Solr
+    adhere to.  Generally, you want to use the latest version to
+    get all bug fixes and improvements. It is highly recommended
+    that you fully re-index after changing this setting as it can
+    affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>7.0.0</luceneMatchVersion>
+
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-dataimporthandler-.*\.jar"/>
+
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="df">text</str>
+    </lst>
+  </requestHandler>
+
+  <requestHandler name="/dataimport" class="solr.DataImportHandler">
+    <lst name="defaults">
+      <str name="config">atom-data-config.xml</str>
+      <str name="processor">trim_text</str>
+    </lst>
+  </requestHandler>
+
+  <updateProcessor class="solr.processor.TrimFieldUpdateProcessorFactory" name="trim_text">
+    <str name="typeName">text_en_splitting</str>
+  </updateProcessor>
+
+</config>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/atom/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/atom/conf/synonyms.txt b/solr/example/example-DIH/solr/atom/conf/synonyms.txt
new file mode 100644
index 0000000..eab4ee8
--- /dev/null
+++ b/solr/example/example-DIH/solr/atom/conf/synonyms.txt
@@ -0,0 +1,29 @@
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#-----------------------------------------------------------------------
+#some test synonym mappings unlikely to appear in real input text
+aaafoo => aaabar
+bbbfoo => bbbfoo bbbbar
+cccfoo => cccbar cccbaz
+fooaaa,baraaa,bazaaa
+
+# Some synonym groups specific to this example
+GB,gib,gigabyte,gigabytes
+MB,mib,megabyte,megabytes
+Television, Televisions, TV, TVs
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
+#after us won't split it into two words.
+
+# Synonym mappings can be used for spelling correction too
+pixima => pixma
+

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/atom/conf/url_types.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/atom/conf/url_types.txt b/solr/example/example-DIH/solr/atom/conf/url_types.txt
new file mode 100644
index 0000000..808f313
--- /dev/null
+++ b/solr/example/example-DIH/solr/atom/conf/url_types.txt
@@ -0,0 +1 @@
+<URL>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/atom/core.properties
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/atom/core.properties b/solr/example/example-DIH/solr/atom/core.properties
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/admin-extra.html
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/admin-extra.html b/solr/example/example-DIH/solr/rss/conf/admin-extra.html
deleted file mode 100644
index fecab20..0000000
--- a/solr/example/example-DIH/solr/rss/conf/admin-extra.html
+++ /dev/null
@@ -1,24 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- The content of this page will be statically included into the top-
-right box of the cores overview page. Uncomment this as an example to 
-see there the content will show up.
-
-<img src="img/ico/construction.png"> This line will appear at the top-
-right box on collection1's Overview
--->

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/admin-extra.menu-bottom.html
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/admin-extra.menu-bottom.html b/solr/example/example-DIH/solr/rss/conf/admin-extra.menu-bottom.html
deleted file mode 100644
index 3359a46..0000000
--- a/solr/example/example-DIH/solr/rss/conf/admin-extra.menu-bottom.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- admin-extra.menu-bottom.html -->
-<!--
-<li>
-  <a href="#" style="background-image: url(img/ico/construction.png);">
-    LAST ITEM
-  </a>
-</li>
--->

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/admin-extra.menu-top.html
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/admin-extra.menu-top.html b/solr/example/example-DIH/solr/rss/conf/admin-extra.menu-top.html
deleted file mode 100644
index 0886cee..0000000
--- a/solr/example/example-DIH/solr/rss/conf/admin-extra.menu-top.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- admin-extra.menu-top.html -->
-<!--
-<li>
-  <a href="#" style="background-image: url(img/ico/construction.png);">
-    FIRST ITEM
-  </a>
-</li>
--->

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/kmeans-attributes.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/kmeans-attributes.xml b/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/kmeans-attributes.xml
deleted file mode 100644
index d802465..0000000
--- a/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/kmeans-attributes.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!-- 
-  Default configuration for the bisecting k-means clustering algorithm.
-  
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="MultilingualClustering.languageAggregationStrategy">
-            <value type="org.carrot2.text.clustering.MultilingualClustering$LanguageAggregationStrategy" value="FLATTEN_MAJOR_LANGUAGE"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/lingo-attributes.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/lingo-attributes.xml b/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/lingo-attributes.xml
deleted file mode 100644
index 4bf1360..0000000
--- a/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/lingo-attributes.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<!-- 
-  Default configuration for the Lingo clustering algorithm.
-
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <!-- 
-          The language to assume for clustered documents.
-          For a list of allowed values, see: 
-          http://download.carrot2.org/stable/manual/#section.attribute.lingo.MultilingualClustering.defaultLanguage
-          -->
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="LingoClusteringAlgorithm.desiredClusterCountBase">
-            <value type="java.lang.Integer" value="20"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/stc-attributes.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/stc-attributes.xml b/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/stc-attributes.xml
deleted file mode 100644
index c1bf110..0000000
--- a/solr/example/example-DIH/solr/rss/conf/clustering/carrot2/stc-attributes.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!-- 
-  Default configuration for the STC clustering algorithm.
-
-  This file can be loaded (and saved) by Carrot2 Workbench.
-  http://project.carrot2.org/download.html
--->
-<attribute-sets default="attributes">
-    <attribute-set id="attributes">
-      <value-set>
-        <label>attributes</label>
-          <attribute key="MultilingualClustering.defaultLanguage">
-            <value type="org.carrot2.core.LanguageCode" value="ENGLISH"/>
-          </attribute>
-          <attribute key="MultilingualClustering.languageAggregationStrategy">
-            <value type="org.carrot2.text.clustering.MultilingualClustering$LanguageAggregationStrategy" value="FLATTEN_MAJOR_LANGUAGE"/>
-          </attribute>
-      </value-set>
-  </attribute-set>
-</attribute-sets>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/currency.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/currency.xml b/solr/example/example-DIH/solr/rss/conf/currency.xml
deleted file mode 100644
index 3a9c58a..0000000
--- a/solr/example/example-DIH/solr/rss/conf/currency.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- Example exchange rates file for CurrencyField type named "currency" in example schema -->
-
-<currencyConfig version="1.0">
-  <rates>
-    <!-- Updated from http://www.exchangerate.com/ at 2011-09-27 -->
-    <rate from="USD" to="ARS" rate="4.333871" comment="ARGENTINA Peso" />
-    <rate from="USD" to="AUD" rate="1.025768" comment="AUSTRALIA Dollar" />
-    <rate from="USD" to="EUR" rate="0.743676" comment="European Euro" />
-    <rate from="USD" to="BRL" rate="1.881093" comment="BRAZIL Real" />
-    <rate from="USD" to="CAD" rate="1.030815" comment="CANADA Dollar" />
-    <rate from="USD" to="CLP" rate="519.0996" comment="CHILE Peso" />
-    <rate from="USD" to="CNY" rate="6.387310" comment="CHINA Yuan" />
-    <rate from="USD" to="CZK" rate="18.47134" comment="CZECH REP. Koruna" />
-    <rate from="USD" to="DKK" rate="5.515436" comment="DENMARK Krone" />
-    <rate from="USD" to="HKD" rate="7.801922" comment="HONG KONG Dollar" />
-    <rate from="USD" to="HUF" rate="215.6169" comment="HUNGARY Forint" />
-    <rate from="USD" to="ISK" rate="118.1280" comment="ICELAND Krona" />
-    <rate from="USD" to="INR" rate="49.49088" comment="INDIA Rupee" />
-    <rate from="USD" to="XDR" rate="0.641358" comment="INTNL MON. FUND SDR" />
-    <rate from="USD" to="ILS" rate="3.709739" comment="ISRAEL Sheqel" />
-    <rate from="USD" to="JPY" rate="76.32419" comment="JAPAN Yen" />
-    <rate from="USD" to="KRW" rate="1169.173" comment="KOREA (SOUTH) Won" />
-    <rate from="USD" to="KWD" rate="0.275142" comment="KUWAIT Dinar" />
-    <rate from="USD" to="MXN" rate="13.85895" comment="MEXICO Peso" />
-    <rate from="USD" to="NZD" rate="1.285159" comment="NEW ZEALAND Dollar" />
-    <rate from="USD" to="NOK" rate="5.859035" comment="NORWAY Krone" />
-    <rate from="USD" to="PKR" rate="87.57007" comment="PAKISTAN Rupee" />
-    <rate from="USD" to="PEN" rate="2.730683" comment="PERU Sol" />
-    <rate from="USD" to="PHP" rate="43.62039" comment="PHILIPPINES Peso" />
-    <rate from="USD" to="PLN" rate="3.310139" comment="POLAND Zloty" />
-    <rate from="USD" to="RON" rate="3.100932" comment="ROMANIA Leu" />
-    <rate from="USD" to="RUB" rate="32.14663" comment="RUSSIA Ruble" />
-    <rate from="USD" to="SAR" rate="3.750465" comment="SAUDI ARABIA Riyal" />
-    <rate from="USD" to="SGD" rate="1.299352" comment="SINGAPORE Dollar" />
-    <rate from="USD" to="ZAR" rate="8.329761" comment="SOUTH AFRICA Rand" />
-    <rate from="USD" to="SEK" rate="6.883442" comment="SWEDEN Krona" />
-    <rate from="USD" to="CHF" rate="0.906035" comment="SWITZERLAND Franc" />
-    <rate from="USD" to="TWD" rate="30.40283" comment="TAIWAN Dollar" />
-    <rate from="USD" to="THB" rate="30.89487" comment="THAILAND Baht" />
-    <rate from="USD" to="AED" rate="3.672955" comment="U.A.E. Dirham" />
-    <rate from="USD" to="UAH" rate="7.988582" comment="UKRAINE Hryvnia" />
-    <rate from="USD" to="GBP" rate="0.647910" comment="UNITED KINGDOM Pound" />
-    
-    <!-- Cross-rates for some common currencies -->
-    <rate from="EUR" to="GBP" rate="0.869914" />  
-    <rate from="EUR" to="NOK" rate="7.800095" />  
-    <rate from="GBP" to="NOK" rate="8.966508" />  
-  </rates>
-</currencyConfig>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/elevate.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/elevate.xml b/solr/example/example-DIH/solr/rss/conf/elevate.xml
deleted file mode 100644
index 2c09ebe..0000000
--- a/solr/example/example-DIH/solr/rss/conf/elevate.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- If this file is found in the config directory, it will only be
-     loaded once at startup.  If it is found in Solr's data
-     directory, it will be re-loaded every commit.
-
-   See http://wiki.apache.org/solr/QueryElevationComponent for more info
-
--->
-<elevate>
- <!-- Query elevation examples
-  <query text="foo bar">
-    <doc id="1" />
-    <doc id="2" />
-    <doc id="3" />
-  </query>
-
-for use with techproducts example
- 
-  <query text="ipod">
-    <doc id="MA147LL/A" />  put the actual ipod at the top 
-    <doc id="IW-02" exclude="true" /> exclude this cable
-  </query>
--->
-
-</elevate>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/contractions_ca.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/contractions_ca.txt b/solr/example/example-DIH/solr/rss/conf/lang/contractions_ca.txt
deleted file mode 100644
index 307a85f..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/contractions_ca.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-# Set of Catalan contractions for ElisionFilter
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-d
-l
-m
-n
-s
-t

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/contractions_fr.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/contractions_fr.txt b/solr/example/example-DIH/solr/rss/conf/lang/contractions_fr.txt
deleted file mode 100644
index f1bba51..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/contractions_fr.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-# Set of French contractions for ElisionFilter
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-l
-m
-t
-qu
-n
-s
-j
-d
-c
-jusqu
-quoiqu
-lorsqu
-puisqu

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/contractions_ga.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/contractions_ga.txt b/solr/example/example-DIH/solr/rss/conf/lang/contractions_ga.txt
deleted file mode 100644
index 9ebe7fa..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/contractions_ga.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-# Set of Irish contractions for ElisionFilter
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-d
-m
-b

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/contractions_it.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/contractions_it.txt b/solr/example/example-DIH/solr/rss/conf/lang/contractions_it.txt
deleted file mode 100644
index cac0409..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/contractions_it.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Set of Italian contractions for ElisionFilter
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-c
-l 
-all 
-dall 
-dell 
-nell 
-sull 
-coll 
-pell 
-gl 
-agl 
-dagl 
-degl 
-negl 
-sugl 
-un 
-m 
-t 
-s 
-v 
-d

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/hyphenations_ga.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/hyphenations_ga.txt b/solr/example/example-DIH/solr/rss/conf/lang/hyphenations_ga.txt
deleted file mode 100644
index 4d2642c..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/hyphenations_ga.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-# Set of Irish hyphenations for StopFilter
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-h
-n
-t

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stemdict_nl.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stemdict_nl.txt b/solr/example/example-DIH/solr/rss/conf/lang/stemdict_nl.txt
deleted file mode 100644
index 4410729..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stemdict_nl.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-# Set of overrides for the dutch stemmer
-# TODO: load this as a resource from the analyzer and sync it in build.xml
-fiets	fiets
-bromfiets	bromfiets
-ei	eier
-kind	kinder

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stoptags_ja.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stoptags_ja.txt b/solr/example/example-DIH/solr/rss/conf/lang/stoptags_ja.txt
deleted file mode 100644
index 71b7508..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stoptags_ja.txt
+++ /dev/null
@@ -1,420 +0,0 @@
-#
-# This file defines a Japanese stoptag set for JapanesePartOfSpeechStopFilter.
-#
-# Any token with a part-of-speech tag that exactly matches those defined in this
-# file are removed from the token stream.
-#
-# Set your own stoptags by uncommenting the lines below.  Note that comments are
-# not allowed on the same line as a stoptag.  See LUCENE-3745 for frequency lists,
-# etc. that can be useful for building you own stoptag set.
-#
-# The entire possible tagset is provided below for convenience.
-#
-#####
-#  noun: unclassified nouns
-#\u540d\u8a5e
-#
-#  noun-common: Common nouns or nouns where the sub-classification is undefined
-#\u540d\u8a5e-\u4e00\u822c
-#
-#  noun-proper: Proper nouns where the sub-classification is undefined 
-#\u540d\u8a5e-\u56fa\u6709\u540d\u8a5e
-#
-#  noun-proper-misc: miscellaneous proper nouns
-#\u540d\u8a5e-\u56fa\u6709\u540d\u8a5e-\u4e00\u822c
-#
-#  noun-proper-person: Personal names where the sub-classification is undefined
-#\u540d\u8a5e-\u56fa\u6709\u540d\u8a5e-\u4eba\u540d
-#
-#  noun-proper-person-misc: names that cannot be divided into surname and 
-#  given name; foreign names; names where the surname or given name is unknown.
-#  e.g. \u304a\u5e02\u306e\u65b9
-#\u540d\u8a5e-\u56fa\u6709\u540d\u8a5e-\u4eba\u540d-\u4e00\u822c
-#
-#  noun-proper-person-surname: Mainly Japanese surnames.
-#  e.g. \u5c71\u7530
-#\u540d\u8a5e-\u56fa\u6709\u540d\u8a5e-\u4eba\u540d-\u59d3
-#
-#  noun-proper-person-given_name: Mainly Japanese given names.
-#  e.g. \u592a\u90ce
-#\u540d\u8a5e-\u56fa\u6709\u540d\u8a5e-\u4eba\u540d-\u540d
-#
-#  noun-proper-organization: Names representing organizations.
-#  e.g. \u901a\u7523\u7701, NHK
-#\u540d\u8a5e-\u56fa\u6709\u540d\u8a5e-\u7d44\u7e54
-#
-#  noun-proper-place: Place names where the sub-classification is undefined
-#\u540d\u8a5e-\u56fa\u6709\u540d\u8a5e-\u5730\u57df
-#
-#  noun-proper-place-misc: Place names excluding countries.
-#  e.g. \u30a2\u30b8\u30a2, \u30d0\u30eb\u30bb\u30ed\u30ca, \u4eac\u90fd
-#\u540d\u8a5e-\u56fa\u6709\u540d\u8a5e-\u5730\u57df-\u4e00\u822c
-#
-#  noun-proper-place-country: Country names. 
-#  e.g. \u65e5\u672c, \u30aa\u30fc\u30b9\u30c8\u30e9\u30ea\u30a2
-#\u540d\u8a5e-\u56fa\u6709\u540d\u8a5e-\u5730\u57df-\u56fd
-#
-#  noun-pronoun: Pronouns where the sub-classification is undefined
-#\u540d\u8a5e-\u4ee3\u540d\u8a5e
-#
-#  noun-pronoun-misc: miscellaneous pronouns: 
-#  e.g. \u305d\u308c, \u3053\u3053, \u3042\u3044\u3064, \u3042\u306a\u305f, \u3042\u3061\u3053\u3061, \u3044\u304f\u3064, \u3069\u3053\u304b, \u306a\u306b, \u307f\u306a\u3055\u3093, \u307f\u3093\u306a, \u308f\u305f\u304f\u3057, \u308f\u308c\u308f\u308c
-#\u540d\u8a5e-\u4ee3\u540d\u8a5e-\u4e00\u822c
-#
-#  noun-pronoun-contraction: Spoken language contraction made by combining a 
-#  pronoun and the particle 'wa'.
-#  e.g. \u3042\u308a\u3083, \u3053\u308a\u3083, \u3053\u308a\u3083\u3042, \u305d\u308a\u3083, \u305d\u308a\u3083\u3042 
-#\u540d\u8a5e-\u4ee3\u540d\u8a5e-\u7e2e\u7d04
-#
-#  noun-adverbial: Temporal nouns such as names of days or months that behave 
-#  like adverbs. Nouns that represent amount or ratios and can be used adverbially,
-#  e.g. \u91d1\u66dc, \u4e00\u6708, \u5348\u5f8c, \u5c11\u91cf
-#\u540d\u8a5e-\u526f\u8a5e\u53ef\u80fd
-#
-#  noun-verbal: Nouns that take arguments with case and can appear followed by 
-#  'suru' and related verbs (\u3059\u308b, \u3067\u304d\u308b, \u306a\u3055\u308b, \u304f\u3060\u3055\u308b)
-#  e.g. \u30a4\u30f3\u30d7\u30c3\u30c8, \u611b\u7740, \u60aa\u5316, \u60aa\u6226\u82e6\u95d8, \u4e00\u5b89\u5fc3, \u4e0b\u53d6\u308a
-#\u540d\u8a5e-\u30b5\u5909\u63a5\u7d9a
-#
-#  noun-adjective-base: The base form of adjectives, words that appear before \u306a ("na")
-#  e.g. \u5065\u5eb7, \u5b89\u6613, \u99c4\u76ee, \u3060\u3081
-#\u540d\u8a5e-\u5f62\u5bb9\u52d5\u8a5e\u8a9e\u5e79
-#
-#  noun-numeric: Arabic numbers, Chinese numerals, and counters like \u4f55 (\u56de), \u6570.
-#  e.g. 0, 1, 2, \u4f55, \u6570, \u5e7e
-#\u540d\u8a5e-\u6570
-#
-#  noun-affix: noun affixes where the sub-classification is undefined
-#\u540d\u8a5e-\u975e\u81ea\u7acb
-#
-#  noun-affix-misc: Of adnominalizers, the case-marker \u306e ("no"), and words that 
-#  attach to the base form of inflectional words, words that cannot be classified 
-#  into any of the other categories below. This category includes indefinite nouns.
-#  e.g. \u3042\u304b\u3064\u304d, \u6681, \u304b\u3044, \u7532\u6590, \u6c17, \u304d\u3089\u3044, \u5acc\u3044, \u304f\u305b, \u7656, \u3053\u3068, \u4e8b, \u3054\u3068, \u6bce, \u3057\u3060\u3044, \u6b21\u7b2c, 
-#       \u9806, \u305b\u3044, \u6240\u70ba, \u3064\u3044\u3067, \u5e8f\u3067, \u3064\u3082\u308a, \u7a4d\u3082\u308a, \u70b9, \u3069\u3053\u308d, \u306e, \u306f\u305a, \u7b48, \u306f\u305a\u307f, \u5f3e\u307f, 
-#       \u62cd\u5b50, \u3075\u3046, \u3075\u308a, \u632f\u308a, \u307b\u3046, \u65b9, \u65e8, \u3082\u306e, \u7269, \u8005, \u3086\u3048, \u6545, \u3086\u3048\u3093, \u6240\u4ee5, \u308f\u3051, \u8a33,
-#       \u308f\u308a, \u5272\u308a, \u5272, \u3093-\u53e3\u8a9e/, \u3082\u3093-\u53e3\u8a9e/
-#\u540d\u8a5e-\u975e\u81ea\u7acb-\u4e00\u822c
-#
-#  noun-affix-adverbial: noun affixes that that can behave as adverbs.
-#  e.g. \u3042\u3044\u3060, \u9593, \u3042\u3052\u304f, \u6319\u3052\u53e5, \u3042\u3068, \u5f8c, \u4f59\u308a, \u4ee5\u5916, \u4ee5\u964d, \u4ee5\u5f8c, \u4ee5\u4e0a, \u4ee5\u524d, \u4e00\u65b9, \u3046\u3048, 
-#       \u4e0a, \u3046\u3061, \u5185, \u304a\u308a, \u6298\u308a, \u304b\u304e\u308a, \u9650\u308a, \u304d\u308a, \u3063\u304d\u308a, \u7d50\u679c, \u3053\u308d, \u9803, \u3055\u3044, \u969b, \u6700\u4e2d, \u3055\u306a\u304b, 
-#       \u6700\u4e2d, \u3058\u305f\u3044, \u81ea\u4f53, \u305f\u3073, \u5ea6, \u305f\u3081, \u70ba, \u3064\u3069, \u90fd\u5ea6, \u3068\u304a\u308a, \u901a\u308a, \u3068\u304d, \u6642, \u3068\u3053\u308d, \u6240, 
-#       \u3068\u305f\u3093, \u9014\u7aef, \u306a\u304b, \u4e2d, \u306e\u3061, \u5f8c, \u3070\u3042\u3044, \u5834\u5408, \u65e5, \u3076\u3093, \u5206, \u307b\u304b, \u4ed6, \u307e\u3048, \u524d, \u307e\u307e, 
-#       \u5118, \u4fad, \u307f\u304e\u308a, \u77e2\u5148
-#\u540d\u8a5e-\u975e\u81ea\u7acb-\u526f\u8a5e\u53ef\u80fd
-#
-#  noun-affix-aux: noun affixes treated as \u52a9\u52d5\u8a5e ("auxiliary verb") in school grammars 
-#  with the stem \u3088\u3046(\u3060) ("you(da)").
-#  e.g.  \u3088\u3046, \u3084\u3046, \u69d8 (\u3088\u3046)
-#\u540d\u8a5e-\u975e\u81ea\u7acb-\u52a9\u52d5\u8a5e\u8a9e\u5e79
-#  
-#  noun-affix-adjective-base: noun affixes that can connect to the indeclinable
-#  connection form \u306a (aux "da").
-#  e.g. \u307f\u305f\u3044, \u3075\u3046
-#\u540d\u8a5e-\u975e\u81ea\u7acb-\u5f62\u5bb9\u52d5\u8a5e\u8a9e\u5e79
-#
-#  noun-special: special nouns where the sub-classification is undefined.
-#\u540d\u8a5e-\u7279\u6b8a
-#
-#  noun-special-aux: The \u305d\u3046\u3060 ("souda") stem form that is used for reporting news, is 
-#  treated as \u52a9\u52d5\u8a5e ("auxiliary verb") in school grammars, and attach to the base 
-#  form of inflectional words.
-#  e.g. \u305d\u3046
-#\u540d\u8a5e-\u7279\u6b8a-\u52a9\u52d5\u8a5e\u8a9e\u5e79
-#
-#  noun-suffix: noun suffixes where the sub-classification is undefined.
-#\u540d\u8a5e-\u63a5\u5c3e
-#
-#  noun-suffix-misc: Of the nouns or stem forms of other parts of speech that connect 
-#  to \u30ac\u30eb or \u30bf\u30a4 and can combine into compound nouns, words that cannot be classified into
-#  any of the other categories below. In general, this category is more inclusive than 
-#  \u63a5\u5c3e\u8a9e ("suffix") and is usually the last element in a compound noun.
-#  e.g. \u304a\u304d, \u304b\u305f, \u65b9, \u7532\u6590 (\u304c\u3044), \u304c\u304b\u308a, \u304e\u307f, \u6c17\u5473, \u3050\u308b\u307f, (\uff5e\u3057\u305f) \u3055, \u6b21\u7b2c, \u6e08 (\u305a) \u307f,
-#       \u3088\u3046, (\u3067\u304d)\u3063\u3053, \u611f, \u89b3, \u6027, \u5b66, \u985e, \u9762, \u7528
-#\u540d\u8a5e-\u63a5\u5c3e-\u4e00\u822c
-#
-#  noun-suffix-person: Suffixes that form nouns and attach to person names more often
-#  than other nouns.
-#  e.g. \u541b, \u69d8, \u8457
-#\u540d\u8a5e-\u63a5\u5c3e-\u4eba\u540d
-#
-#  noun-suffix-place: Suffixes that form nouns and attach to place names more often 
-#  than other nouns.
-#  e.g. \u753a, \u5e02, \u770c
-#\u540d\u8a5e-\u63a5\u5c3e-\u5730\u57df
-#
-#  noun-suffix-verbal: Of the suffixes that attach to nouns and form nouns, those that 
-#  can appear before \u30b9\u30eb ("suru").
-#  e.g. \u5316, \u8996, \u5206\u3051, \u5165\u308a, \u843d\u3061, \u8cb7\u3044
-#\u540d\u8a5e-\u63a5\u5c3e-\u30b5\u5909\u63a5\u7d9a
-#
-#  noun-suffix-aux: The stem form of \u305d\u3046\u3060 (\u69d8\u614b) that is used to indicate conditions, 
-#  is treated as \u52a9\u52d5\u8a5e ("auxiliary verb") in school grammars, and attach to the 
-#  conjunctive form of inflectional words.
-#  e.g. \u305d\u3046
-#\u540d\u8a5e-\u63a5\u5c3e-\u52a9\u52d5\u8a5e\u8a9e\u5e79
-#
-#  noun-suffix-adjective-base: Suffixes that attach to other nouns or the conjunctive 
-#  form of inflectional words and appear before the copula \u3060 ("da").
-#  e.g. \u7684, \u3052, \u304c\u3061
-#\u540d\u8a5e-\u63a5\u5c3e-\u5f62\u5bb9\u52d5\u8a5e\u8a9e\u5e79
-#
-#  noun-suffix-adverbial: Suffixes that attach to other nouns and can behave as adverbs.
-#  e.g. \u5f8c (\u3054), \u4ee5\u5f8c, \u4ee5\u964d, \u4ee5\u524d, \u524d\u5f8c, \u4e2d, \u672b, \u4e0a, \u6642 (\u3058)
-#\u540d\u8a5e-\u63a5\u5c3e-\u526f\u8a5e\u53ef\u80fd
-#
-#  noun-suffix-classifier: Suffixes that attach to numbers and form nouns. This category 
-#  is more inclusive than \u52a9\u6570\u8a5e ("classifier") and includes common nouns that attach 
-#  to numbers.
-#  e.g. \u500b, \u3064, \u672c, \u518a, \u30d1\u30fc\u30bb\u30f3\u30c8, cm, kg, \u30ab\u6708, \u304b\u56fd, \u533a\u753b, \u6642\u9593, \u6642\u534a
-#\u540d\u8a5e-\u63a5\u5c3e-\u52a9\u6570\u8a5e
-#
-#  noun-suffix-special: Special suffixes that mainly attach to inflecting words.
-#  e.g. (\u697d\u3057) \u3055, (\u8003\u3048) \u65b9
-#\u540d\u8a5e-\u63a5\u5c3e-\u7279\u6b8a
-#
-#  noun-suffix-conjunctive: Nouns that behave like conjunctions and join two words 
-#  together.
-#  e.g. (\u65e5\u672c) \u5bfe (\u30a2\u30e1\u30ea\u30ab), \u5bfe (\u30a2\u30e1\u30ea\u30ab), (3) \u5bfe (5), (\u5973\u512a) \u517c (\u4e3b\u5a66)
-#\u540d\u8a5e-\u63a5\u7d9a\u8a5e\u7684
-#
-#  noun-verbal_aux: Nouns that attach to the conjunctive particle \u3066 ("te") and are 
-#  semantically verb-like.
-#  e.g. \u3054\u3089\u3093, \u3054\u89a7, \u5fa1\u89a7, \u9802\u6234
-#\u540d\u8a5e-\u52d5\u8a5e\u975e\u81ea\u7acb\u7684
-#
-#  noun-quotation: text that cannot be segmented into words, proverbs, Chinese poetry, 
-#  dialects, English, etc. Currently, the only entry for \u540d\u8a5e \u5f15\u7528\u6587\u5b57\u5217 ("noun quotation") 
-#  is \u3044\u308f\u304f ("iwaku").
-#\u540d\u8a5e-\u5f15\u7528\u6587\u5b57\u5217
-#
-#  noun-nai_adjective: Words that appear before the auxiliary verb \u306a\u3044 ("nai") and
-#  behave like an adjective.
-#  e.g. \u7533\u3057\u8a33, \u4ed5\u65b9, \u3068\u3093\u3067\u3082, \u9055\u3044
-#\u540d\u8a5e-\u30ca\u30a4\u5f62\u5bb9\u8a5e\u8a9e\u5e79
-#
-#####
-#  prefix: unclassified prefixes
-#\u63a5\u982d\u8a5e
-#
-#  prefix-nominal: Prefixes that attach to nouns (including adjective stem forms) 
-#  excluding numerical expressions.
-#  e.g. \u304a (\u6c34), \u67d0 (\u6c0f), \u540c (\u793e), \u6545 (\uff5e\u6c0f), \u9ad8 (\u54c1\u8cea), \u304a (\u898b\u4e8b), \u3054 (\u7acb\u6d3e)
-#\u63a5\u982d\u8a5e-\u540d\u8a5e\u63a5\u7d9a
-#
-#  prefix-verbal: Prefixes that attach to the imperative form of a verb or a verb
-#  in conjunctive form followed by \u306a\u308b/\u306a\u3055\u308b/\u304f\u3060\u3055\u308b.
-#  e.g. \u304a (\u8aad\u307f\u306a\u3055\u3044), \u304a (\u5ea7\u308a)
-#\u63a5\u982d\u8a5e-\u52d5\u8a5e\u63a5\u7d9a
-#
-#  prefix-adjectival: Prefixes that attach to adjectives.
-#  e.g. \u304a (\u5bd2\u3044\u3067\u3059\u306d\u3048), \u30d0\u30ab (\u3067\u304b\u3044)
-#\u63a5\u982d\u8a5e-\u5f62\u5bb9\u8a5e\u63a5\u7d9a
-#
-#  prefix-numerical: Prefixes that attach to numerical expressions.
-#  e.g. \u7d04, \u304a\u3088\u305d, \u6bce\u6642
-#\u63a5\u982d\u8a5e-\u6570\u63a5\u7d9a
-#
-#####
-#  verb: unclassified verbs
-#\u52d5\u8a5e
-#
-#  verb-main:
-#\u52d5\u8a5e-\u81ea\u7acb
-#
-#  verb-auxiliary:
-#\u52d5\u8a5e-\u975e\u81ea\u7acb
-#
-#  verb-suffix:
-#\u52d5\u8a5e-\u63a5\u5c3e
-#
-#####
-#  adjective: unclassified adjectives
-#\u5f62\u5bb9\u8a5e
-#
-#  adjective-main:
-#\u5f62\u5bb9\u8a5e-\u81ea\u7acb
-#
-#  adjective-auxiliary:
-#\u5f62\u5bb9\u8a5e-\u975e\u81ea\u7acb
-#
-#  adjective-suffix:
-#\u5f62\u5bb9\u8a5e-\u63a5\u5c3e
-#
-#####
-#  adverb: unclassified adverbs
-#\u526f\u8a5e
-#
-#  adverb-misc: Words that can be segmented into one unit and where adnominal 
-#  modification is not possible.
-#  e.g. \u3042\u3044\u304b\u308f\u3089\u305a, \u591a\u5206
-#\u526f\u8a5e-\u4e00\u822c
-#
-#  adverb-particle_conjunction: Adverbs that can be followed by \u306e, \u306f, \u306b, 
-#  \u306a, \u3059\u308b, \u3060, etc.
-#  e.g. \u3053\u3093\u306a\u306b, \u305d\u3093\u306a\u306b, \u3042\u3093\u306a\u306b, \u306a\u306b\u304b, \u306a\u3093\u3067\u3082
-#\u526f\u8a5e-\u52a9\u8a5e\u985e\u63a5\u7d9a
-#
-#####
-#  adnominal: Words that only have noun-modifying forms.
-#  e.g. \u3053\u306e, \u305d\u306e, \u3042\u306e, \u3069\u306e, \u3044\u308f\u3086\u308b, \u306a\u3093\u3089\u304b\u306e, \u4f55\u3089\u304b\u306e, \u3044\u308d\u3093\u306a, \u3053\u3046\u3044\u3046, \u305d\u3046\u3044\u3046, \u3042\u3042\u3044\u3046, 
-#       \u3069\u3046\u3044\u3046, \u3053\u3093\u306a, \u305d\u3093\u306a, \u3042\u3093\u306a, \u3069\u3093\u306a, \u5927\u304d\u306a, \u5c0f\u3055\u306a, \u304a\u304b\u3057\u306a, \u307b\u3093\u306e, \u305f\u3044\u3057\u305f, 
-#       \u300c(, \u3082) \u3055\u308b (\u3053\u3068\u306a\u304c\u3089)\u300d, \u5fae\u3005\u305f\u308b, \u5802\u3005\u305f\u308b, \u5358\u306a\u308b, \u3044\u304b\u306a\u308b, \u6211\u304c\u300d\u300c\u540c\u3058, \u4ea1\u304d
-#\u9023\u4f53\u8a5e
-#
-#####
-#  conjunction: Conjunctions that can occur independently.
-#  e.g. \u304c, \u3051\u308c\u3069\u3082, \u305d\u3057\u3066, \u3058\u3083\u3042, \u305d\u308c\u3069\u3053\u308d\u304b
-\u63a5\u7d9a\u8a5e
-#
-#####
-#  particle: unclassified particles.
-\u52a9\u8a5e
-#
-#  particle-case: case particles where the subclassification is undefined.
-\u52a9\u8a5e-\u683c\u52a9\u8a5e
-#
-#  particle-case-misc: Case particles.
-#  e.g. \u304b\u3089, \u304c, \u3067, \u3068, \u306b, \u3078, \u3088\u308a, \u3092, \u306e, \u306b\u3066
-\u52a9\u8a5e-\u683c\u52a9\u8a5e-\u4e00\u822c
-#
-#  particle-case-quote: the "to" that appears after nouns, a person\u2019s speech, 
-#  quotation marks, expressions of decisions from a meeting, reasons, judgements,
-#  conjectures, etc.
-#  e.g. ( \u3060) \u3068 (\u8ff0\u3079\u305f.), ( \u3067\u3042\u308b) \u3068 (\u3057\u3066\u57f7\u884c\u7336\u4e88...)
-\u52a9\u8a5e-\u683c\u52a9\u8a5e-\u5f15\u7528
-#
-#  particle-case-compound: Compounds of particles and verbs that mainly behave 
-#  like case particles.
-#  e.g. \u3068\u3044\u3046, \u3068\u3044\u3063\u305f, \u3068\u304b\u3044\u3046, \u3068\u3057\u3066, \u3068\u3068\u3082\u306b, \u3068\u5171\u306b, \u3067\u3082\u3063\u3066, \u306b\u3042\u305f\u3063\u3066, \u306b\u5f53\u305f\u3063\u3066, \u306b\u5f53\u3063\u3066,
-#       \u306b\u3042\u305f\u308a, \u306b\u5f53\u305f\u308a, \u306b\u5f53\u308a, \u306b\u5f53\u305f\u308b, \u306b\u3042\u305f\u308b, \u306b\u304a\u3044\u3066, \u306b\u65bc\u3044\u3066,\u306b\u65bc\u3066, \u306b\u304a\u3051\u308b, \u306b\u65bc\u3051\u308b, 
-#       \u306b\u304b\u3051, \u306b\u304b\u3051\u3066, \u306b\u304b\u3093\u3057, \u306b\u95a2\u3057, \u306b\u304b\u3093\u3057\u3066, \u306b\u95a2\u3057\u3066, \u306b\u304b\u3093\u3059\u308b, \u306b\u95a2\u3059\u308b, \u306b\u969b\u3057, 
-#       \u306b\u969b\u3057\u3066, \u306b\u3057\u305f\u304c\u3044, \u306b\u5f93\u3044, \u306b\u5f93\u3046, \u306b\u3057\u305f\u304c\u3063\u3066, \u306b\u5f93\u3063\u3066, \u306b\u305f\u3044\u3057, \u306b\u5bfe\u3057, \u306b\u305f\u3044\u3057\u3066, 
-#       \u306b\u5bfe\u3057\u3066, \u306b\u305f\u3044\u3059\u308b, \u306b\u5bfe\u3059\u308b, \u306b\u3064\u3044\u3066, \u306b\u3064\u304d, \u306b\u3064\u3051, \u306b\u3064\u3051\u3066, \u306b\u3064\u308c, \u306b\u3064\u308c\u3066, \u306b\u3068\u3063\u3066,
-#       \u306b\u3068\u308a, \u306b\u307e\u3064\u308f\u308b, \u306b\u3088\u3063\u3066, \u306b\u4f9d\u3063\u3066, \u306b\u56e0\u3063\u3066, \u306b\u3088\u308a, \u306b\u4f9d\u308a, \u306b\u56e0\u308a, \u306b\u3088\u308b, \u306b\u4f9d\u308b, \u306b\u56e0\u308b, 
-#       \u306b\u308f\u305f\u3063\u3066, \u306b\u308f\u305f\u308b, \u3092\u3082\u3063\u3066, \u3092\u4ee5\u3063\u3066, \u3092\u901a\u3058, \u3092\u901a\u3058\u3066, \u3092\u901a\u3057\u3066, \u3092\u3081\u3050\u3063\u3066, \u3092\u3081\u3050\u308a, \u3092\u3081\u3050\u308b,
-#       \u3063\u3066-\u53e3\u8a9e/, \u3061\u3085\u3046-\u95a2\u897f\u5f01\u300c\u3068\u3044\u3046\u300d/, (\u4f55) \u3066\u3044\u3046 (\u4eba)-\u53e3\u8a9e/, \u3063\u3066\u3044\u3046-\u53e3\u8a9e/, \u3068\u3044\u3075, \u3068\u304b\u3044\u3075
-\u52a9\u8a5e-\u683c\u52a9\u8a5e-\u9023\u8a9e
-#
-#  particle-conjunctive:
-#  e.g. \u304b\u3089, \u304b\u3089\u306b\u306f, \u304c, \u3051\u308c\u3069, \u3051\u308c\u3069\u3082, \u3051\u3069, \u3057, \u3064\u3064, \u3066, \u3067, \u3068, \u3068\u3053\u308d\u304c, \u3069\u3053\u308d\u304b, \u3068\u3082, \u3069\u3082, 
-#       \u306a\u304c\u3089, \u306a\u308a, \u306e\u3067, \u306e\u306b, \u3070, \u3082\u306e\u306e, \u3084 ( \u3057\u305f), \u3084\u3044\u306a\u3084, (\u3053\u308d\u3093) \u3058\u3083(\u3044\u3051\u306a\u3044)-\u53e3\u8a9e/, 
-#       (\u884c\u3063) \u3061\u3083(\u3044\u3051\u306a\u3044)-\u53e3\u8a9e/, (\u8a00\u3063) \u305f\u3063\u3066 (\u3057\u304b\u305f\u304c\u306a\u3044)-\u53e3\u8a9e/, (\u305d\u308c\u304c\u306a\u304f)\u3063\u305f\u3063\u3066 (\u5e73\u6c17)-\u53e3\u8a9e/
-\u52a9\u8a5e-\u63a5\u7d9a\u52a9\u8a5e
-#
-#  particle-dependency:
-#  e.g. \u3053\u305d, \u3055\u3048, \u3057\u304b, \u3059\u3089, \u306f, \u3082, \u305e
-\u52a9\u8a5e-\u4fc2\u52a9\u8a5e
-#
-#  particle-adverbial:
-#  e.g. \u304c\u3066\u3089, \u304b\u3082, \u304f\u3089\u3044, \u4f4d, \u3050\u3089\u3044, \u3057\u3082, (\u5b66\u6821) \u3058\u3083(\u3053\u308c\u304c\u6d41\u884c\u3063\u3066\u3044\u308b)-\u53e3\u8a9e/, 
-#       (\u305d\u308c)\u3058\u3083\u3042 (\u3088\u304f\u306a\u3044)-\u53e3\u8a9e/, \u305a\u3064, (\u79c1) \u306a\u305e, \u306a\u3069, (\u79c1) \u306a\u308a (\u306b), (\u5148\u751f) \u306a\u3093\u304b (\u5927\u5acc\u3044)-\u53e3\u8a9e/,
-#       (\u79c1) \u306a\u3093\u305e, (\u5148\u751f) \u306a\u3093\u3066 (\u5927\u5acc\u3044)-\u53e3\u8a9e/, \u306e\u307f, \u3060\u3051, (\u79c1) \u3060\u3063\u3066-\u53e3\u8a9e/, \u3060\u306b, 
-#       (\u5f7c)\u3063\u305f\u3089-\u53e3\u8a9e/, (\u304a\u8336) \u3067\u3082 (\u3044\u304b\u304c), \u7b49 (\u3068\u3046), (\u4eca\u5f8c) \u3068\u3082, \u3070\u304b\u308a, \u3070\u3063\u304b-\u53e3\u8a9e/, \u3070\u3063\u304b\u308a-\u53e3\u8a9e/,
-#       \u307b\u3069, \u7a0b, \u307e\u3067, \u8fc4, (\u8ab0) \u3082 (\u304c)([\u52a9\u8a5e-\u683c\u52a9\u8a5e] \u304a\u3088\u3073 [\u52a9\u8a5e-\u4fc2\u52a9\u8a5e] \u306e\u524d\u306b\u4f4d\u7f6e\u3059\u308b\u300c\u3082\u300d)
-\u52a9\u8a5e-\u526f\u52a9\u8a5e
-#
-#  particle-interjective: particles with interjective grammatical roles.
-#  e.g. (\u677e\u5cf6) \u3084
-\u52a9\u8a5e-\u9593\u6295\u52a9\u8a5e
-#
-#  particle-coordinate:
-#  e.g. \u3068, \u305f\u308a, \u3060\u306e, \u3060\u308a, \u3068\u304b, \u306a\u308a, \u3084, \u3084\u3089
-\u52a9\u8a5e-\u4e26\u7acb\u52a9\u8a5e
-#
-#  particle-final:
-#  e.g. \u304b\u3044, \u304b\u3057\u3089, \u3055, \u305c, (\u3060)\u3063\u3051-\u53e3\u8a9e/, (\u3068\u307e\u3063\u3066\u308b) \u3067-\u65b9\u8a00/, \u306a, \u30ca, \u306a\u3042-\u53e3\u8a9e/, \u305e, \u306d, \u30cd, 
-#       \u306d\u3047-\u53e3\u8a9e/, \u306d\u3048-\u53e3\u8a9e/, \u306d\u3093-\u65b9\u8a00/, \u306e, \u306e\u3046-\u53e3\u8a9e/, \u3084, \u3088, \u30e8, \u3088\u3049-\u53e3\u8a9e/, \u308f, \u308f\u3044-\u53e3\u8a9e/
-\u52a9\u8a5e-\u7d42\u52a9\u8a5e
-#
-#  particle-adverbial/conjunctive/final: The particle "ka" when unknown whether it is 
-#  adverbial, conjunctive, or sentence final. For example:
-#       (a) \u300cA \u304b B \u304b\u300d. Ex:\u300c(\u56fd\u5185\u3067\u904b\u7528\u3059\u308b) \u304b,(\u6d77\u5916\u3067\u904b\u7528\u3059\u308b) \u304b (.)\u300d
-#       (b) Inside an adverb phrase. Ex:\u300c(\u5e78\u3044\u3068\u3044\u3046) \u304b (, \u6b7b\u8005\u306f\u3044\u306a\u304b\u3063\u305f.)\u300d
-#           \u300c(\u7948\u308a\u304c\u5c4a\u3044\u305f\u305b\u3044) \u304b (, \u8a66\u9a13\u306b\u5408\u683c\u3057\u305f.)\u300d
-#       (c) \u300c\u304b\u306e\u3088\u3046\u306b\u300d. Ex:\u300c(\u4f55\u3082\u306a\u304b\u3063\u305f) \u304b (\u306e\u3088\u3046\u306b\u632f\u308b\u821e\u3063\u305f.)\u300d
-#  e.g. \u304b
-\u52a9\u8a5e-\u526f\u52a9\u8a5e\uff0f\u4e26\u7acb\u52a9\u8a5e\uff0f\u7d42\u52a9\u8a5e
-#
-#  particle-adnominalizer: The "no" that attaches to nouns and modifies 
-#  non-inflectional words.
-\u52a9\u8a5e-\u9023\u4f53\u5316
-#
-#  particle-adnominalizer: The "ni" and "to" that appear following nouns and adverbs 
-#  that are giongo, giseigo, or gitaigo.
-#  e.g. \u306b, \u3068
-\u52a9\u8a5e-\u526f\u8a5e\u5316
-#
-#  particle-special: A particle that does not fit into one of the above classifications. 
-#  This includes particles that are used in Tanka, Haiku, and other poetry.
-#  e.g. \u304b\u306a, \u3051\u3080, ( \u3057\u305f\u3060\u308d\u3046) \u306b, (\u3042\u3093\u305f) \u306b\u3083(\u308f\u304b\u3089\u3093), (\u4ffa) \u3093 (\u5bb6)
-\u52a9\u8a5e-\u7279\u6b8a
-#
-#####
-#  auxiliary-verb:
-\u52a9\u52d5\u8a5e
-#
-#####
-#  interjection: Greetings and other exclamations.
-#  e.g. \u304a\u306f\u3088\u3046, \u304a\u306f\u3088\u3046\u3054\u3056\u3044\u307e\u3059, \u3053\u3093\u306b\u3061\u306f, \u3053\u3093\u3070\u3093\u306f, \u3042\u308a\u304c\u3068\u3046, \u3069\u3046\u3082\u3042\u308a\u304c\u3068\u3046, \u3042\u308a\u304c\u3068\u3046\u3054\u3056\u3044\u307e\u3059, 
-#       \u3044\u305f\u3060\u304d\u307e\u3059, \u3054\u3061\u305d\u3046\u3055\u307e, \u3055\u3088\u306a\u3089, \u3055\u3088\u3046\u306a\u3089, \u306f\u3044, \u3044\u3044\u3048, \u3054\u3081\u3093, \u3054\u3081\u3093\u306a\u3055\u3044
-#\u611f\u52d5\u8a5e
-#
-#####
-#  symbol: unclassified Symbols.
-\u8a18\u53f7
-#
-#  symbol-misc: A general symbol not in one of the categories below.
-#  e.g. [\u25cb\u25ce@$\u3012\u2192+]
-\u8a18\u53f7-\u4e00\u822c
-#
-#  symbol-comma: Commas
-#  e.g. [,\u3001]
-\u8a18\u53f7-\u8aad\u70b9
-#
-#  symbol-period: Periods and full stops.
-#  e.g. [.\uff0e\u3002]
-\u8a18\u53f7-\u53e5\u70b9
-#
-#  symbol-space: Full-width whitespace.
-\u8a18\u53f7-\u7a7a\u767d
-#
-#  symbol-open_bracket:
-#  e.g. [({\u2018\u201c\u300e\u3010]
-\u8a18\u53f7-\u62ec\u5f27\u958b
-#
-#  symbol-close_bracket:
-#  e.g. [)}\u2019\u201d\u300f\u300d\u3011]
-\u8a18\u53f7-\u62ec\u5f27\u9589
-#
-#  symbol-alphabetic:
-#\u8a18\u53f7-\u30a2\u30eb\u30d5\u30a1\u30d9\u30c3\u30c8
-#
-#####
-#  other: unclassified other
-#\u305d\u306e\u4ed6
-#
-#  other-interjection: Words that are hard to classify as noun-suffixes or 
-#  sentence-final particles.
-#  e.g. (\u3060)\u30a1
-\u305d\u306e\u4ed6-\u9593\u6295
-#
-#####
-#  filler: Aizuchi that occurs during a conversation or sounds inserted as filler.
-#  e.g. \u3042\u306e, \u3046\u3093\u3068, \u3048\u3068
-\u30d5\u30a3\u30e9\u30fc
-#
-#####
-#  non-verbal: non-verbal sound.
-\u975e\u8a00\u8a9e\u97f3
-#
-#####
-#  fragment:
-#\u8a9e\u65ad\u7247
-#
-#####
-#  unknown: unknown part of speech.
-#\u672a\u77e5\u8a9e
-#
-##### End of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ar.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ar.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ar.txt
deleted file mode 100644
index 046829d..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ar.txt
+++ /dev/null
@@ -1,125 +0,0 @@
-# This file was created by Jacques Savoy and is distributed under the BSD license.
-# See http://members.unine.ch/jacques.savoy/clef/index.html.
-# Also see http://www.opensource.org/licenses/bsd-license.html
-# Cleaned on October 11, 2009 (not normalized, so use before normalization)
-# This means that when modifying this list, you might need to add some 
-# redundant entries, for example containing forms with both \u0623 and \u0627
-\u0645\u0646
-\u0648\u0645\u0646
-\u0645\u0646\u0647\u0627
-\u0645\u0646\u0647
-\u0641\u064a
-\u0648\u0641\u064a
-\u0641\u064a\u0647\u0627
-\u0641\u064a\u0647
-\u0648
-\u0641
-\u062b\u0645
-\u0627\u0648
-\u0623\u0648
-\u0628
-\u0628\u0647\u0627
-\u0628\u0647
-\u0627
-\u0623
-\u0627\u0649
-\u0627\u064a
-\u0623\u064a
-\u0623\u0649
-\u0644\u0627
-\u0648\u0644\u0627
-\u0627\u0644\u0627
-\u0623\u0644\u0627
-\u0625\u0644\u0627
-\u0644\u0643\u0646
-\u0645\u0627
-\u0648\u0645\u0627
-\u0643\u0645\u0627
-\u0641\u0645\u0627
-\u0639\u0646
-\u0645\u0639
-\u0627\u0630\u0627
-\u0625\u0630\u0627
-\u0627\u0646
-\u0623\u0646
-\u0625\u0646
-\u0627\u0646\u0647\u0627
-\u0623\u0646\u0647\u0627
-\u0625\u0646\u0647\u0627
-\u0627\u0646\u0647
-\u0623\u0646\u0647
-\u0625\u0646\u0647
-\u0628\u0627\u0646
-\u0628\u0623\u0646
-\u0641\u0627\u0646
-\u0641\u0623\u0646
-\u0648\u0627\u0646
-\u0648\u0623\u0646
-\u0648\u0625\u0646
-\u0627\u0644\u062a\u0649
-\u0627\u0644\u062a\u064a
-\u0627\u0644\u0630\u0649
-\u0627\u0644\u0630\u064a
-\u0627\u0644\u0630\u064a\u0646
-\u0627\u0644\u0649
-\u0627\u0644\u064a
-\u0625\u0644\u0649
-\u0625\u0644\u064a
-\u0639\u0644\u0649
-\u0639\u0644\u064a\u0647\u0627
-\u0639\u0644\u064a\u0647
-\u0627\u0645\u0627
-\u0623\u0645\u0627
-\u0625\u0645\u0627
-\u0627\u064a\u0636\u0627
-\u0623\u064a\u0636\u0627
-\u0643\u0644
-\u0648\u0643\u0644
-\u0644\u0645
-\u0648\u0644\u0645
-\u0644\u0646
-\u0648\u0644\u0646
-\u0647\u0649
-\u0647\u064a
-\u0647\u0648
-\u0648\u0647\u0649
-\u0648\u0647\u064a
-\u0648\u0647\u0648
-\u0641\u0647\u0649
-\u0641\u0647\u064a
-\u0641\u0647\u0648
-\u0627\u0646\u062a
-\u0623\u0646\u062a
-\u0644\u0643
-\u0644\u0647\u0627
-\u0644\u0647
-\u0647\u0630\u0647
-\u0647\u0630\u0627
-\u062a\u0644\u0643
-\u0630\u0644\u0643
-\u0647\u0646\u0627\u0643
-\u0643\u0627\u0646\u062a
-\u0643\u0627\u0646
-\u064a\u0643\u0648\u0646
-\u062a\u0643\u0648\u0646
-\u0648\u0643\u0627\u0646\u062a
-\u0648\u0643\u0627\u0646
-\u063a\u064a\u0631
-\u0628\u0639\u0636
-\u0642\u062f
-\u0646\u062d\u0648
-\u0628\u064a\u0646
-\u0628\u064a\u0646\u0645\u0627
-\u0645\u0646\u0630
-\u0636\u0645\u0646
-\u062d\u064a\u062b
-\u0627\u0644\u0627\u0646
-\u0627\u0644\u0622\u0646
-\u062e\u0644\u0627\u0644
-\u0628\u0639\u062f
-\u0642\u0628\u0644
-\u062d\u062a\u0649
-\u0639\u0646\u062f
-\u0639\u0646\u062f\u0645\u0627
-\u0644\u062f\u0649
-\u062c\u0645\u064a\u0639

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_bg.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_bg.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_bg.txt
deleted file mode 100644
index 1ae4ba2..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_bg.txt
+++ /dev/null
@@ -1,193 +0,0 @@
-# This file was created by Jacques Savoy and is distributed under the BSD license.
-# See http://members.unine.ch/jacques.savoy/clef/index.html.
-# Also see http://www.opensource.org/licenses/bsd-license.html
-\u0430
-\u0430\u0437
-\u0430\u043a\u043e
-\u0430\u043b\u0430
-\u0431\u0435
-\u0431\u0435\u0437
-\u0431\u0435\u0448\u0435
-\u0431\u0438
-\u0431\u0438\u043b
-\u0431\u0438\u043b\u0430
-\u0431\u0438\u043b\u0438
-\u0431\u0438\u043b\u043e
-\u0431\u043b\u0438\u0437\u043e
-\u0431\u044a\u0434\u0430\u0442
-\u0431\u044a\u0434\u0435
-\u0431\u044f\u0445\u0430
-\u0432
-\u0432\u0430\u0441
-\u0432\u0430\u0448
-\u0432\u0430\u0448\u0430
-\u0432\u0435\u0440\u043e\u044f\u0442\u043d\u043e
-\u0432\u0435\u0447\u0435
-\u0432\u0437\u0435\u043c\u0430
-\u0432\u0438
-\u0432\u0438\u0435
-\u0432\u0438\u043d\u0430\u0433\u0438
-\u0432\u0441\u0435
-\u0432\u0441\u0435\u043a\u0438
-\u0432\u0441\u0438\u0447\u043a\u0438
-\u0432\u0441\u0438\u0447\u043a\u043e
-\u0432\u0441\u044f\u043a\u0430
-\u0432\u044a\u0432
-\u0432\u044a\u043f\u0440\u0435\u043a\u0438
-\u0432\u044a\u0440\u0445\u0443
-\u0433
-\u0433\u0438
-\u0433\u043b\u0430\u0432\u043d\u043e
-\u0433\u043e
-\u0434
-\u0434\u0430
-\u0434\u0430\u043b\u0438
-\u0434\u043e
-\u0434\u043e\u043a\u0430\u0442\u043e
-\u0434\u043e\u043a\u043e\u0433\u0430
-\u0434\u043e\u0440\u0438
-\u0434\u043e\u0441\u0435\u0433\u0430
-\u0434\u043e\u0441\u0442\u0430
-\u0435
-\u0435\u0434\u0432\u0430
-\u0435\u0434\u0438\u043d
-\u0435\u0442\u043e
-\u0437\u0430
-\u0437\u0430\u0434
-\u0437\u0430\u0435\u0434\u043d\u043e
-\u0437\u0430\u0440\u0430\u0434\u0438
-\u0437\u0430\u0441\u0435\u0433\u0430
-\u0437\u0430\u0442\u043e\u0432\u0430
-\u0437\u0430\u0449\u043e
-\u0437\u0430\u0449\u043e\u0442\u043e
-\u0438
-\u0438\u0437
-\u0438\u043b\u0438
-\u0438\u043c
-\u0438\u043c\u0430
-\u0438\u043c\u0430\u0442
-\u0438\u0441\u043a\u0430
-\u0439
-\u043a\u0430\u0437\u0430
-\u043a\u0430\u043a
-\u043a\u0430\u043a\u0432\u0430
-\u043a\u0430\u043a\u0432\u043e
-\u043a\u0430\u043a\u0442\u043e
-\u043a\u0430\u043a\u044a\u0432
-\u043a\u0430\u0442\u043e
-\u043a\u043e\u0433\u0430
-\u043a\u043e\u0433\u0430\u0442\u043e
-\u043a\u043e\u0435\u0442\u043e
-\u043a\u043e\u0438\u0442\u043e
-\u043a\u043e\u0439
-\u043a\u043e\u0439\u0442\u043e
-\u043a\u043e\u043b\u043a\u043e
-\u043a\u043e\u044f\u0442\u043e
-\u043a\u044a\u0434\u0435
-\u043a\u044a\u0434\u0435\u0442\u043e
-\u043a\u044a\u043c
-\u043b\u0438
-\u043c
-\u043c\u0435
-\u043c\u0435\u0436\u0434\u0443
-\u043c\u0435\u043d
-\u043c\u0438
-\u043c\u043d\u043e\u0437\u0438\u043d\u0430
-\u043c\u043e\u0433\u0430
-\u043c\u043e\u0433\u0430\u0442
-\u043c\u043e\u0436\u0435
-\u043c\u043e\u043b\u044f
-\u043c\u043e\u043c\u0435\u043d\u0442\u0430
-\u043c\u0443
-\u043d
-\u043d\u0430
-\u043d\u0430\u0434
-\u043d\u0430\u0437\u0430\u0434
-\u043d\u0430\u0439
-\u043d\u0430\u043f\u0440\u0430\u0432\u0438
-\u043d\u0430\u043f\u0440\u0435\u0434
-\u043d\u0430\u043f\u0440\u0438\u043c\u0435\u0440
-\u043d\u0430\u0441
-\u043d\u0435
-\u043d\u0435\u0433\u043e
-\u043d\u0435\u044f
-\u043d\u0438
-\u043d\u0438\u0435
-\u043d\u0438\u043a\u043e\u0439
-\u043d\u0438\u0442\u043e
-\u043d\u043e
-\u043d\u044f\u043a\u043e\u0438
-\u043d\u044f\u043a\u043e\u0439
-\u043d\u044f\u043c\u0430
-\u043e\u0431\u0430\u0447\u0435
-\u043e\u043a\u043e\u043b\u043e
-\u043e\u0441\u0432\u0435\u043d
-\u043e\u0441\u043e\u0431\u0435\u043d\u043e
-\u043e\u0442
-\u043e\u0442\u0433\u043e\u0440\u0435
-\u043e\u0442\u043d\u043e\u0432\u043e
-\u043e\u0449\u0435
-\u043f\u0430\u043a
-\u043f\u043e
-\u043f\u043e\u0432\u0435\u0447\u0435
-\u043f\u043e\u0432\u0435\u0447\u0435\u0442\u043e
-\u043f\u043e\u0434
-\u043f\u043e\u043d\u0435
-\u043f\u043e\u0440\u0430\u0434\u0438
-\u043f\u043e\u0441\u043b\u0435
-\u043f\u043e\u0447\u0442\u0438
-\u043f\u0440\u0430\u0432\u0438
-\u043f\u0440\u0435\u0434
-\u043f\u0440\u0435\u0434\u0438
-\u043f\u0440\u0435\u0437
-\u043f\u0440\u0438
-\u043f\u044a\u043a
-\u043f\u044a\u0440\u0432\u043e
-\u0441
-\u0441\u0430
-\u0441\u0430\u043c\u043e
-\u0441\u0435
-\u0441\u0435\u0433\u0430
-\u0441\u0438
-\u0441\u043a\u043e\u0440\u043e
-\u0441\u043b\u0435\u0434
-\u0441\u043c\u0435
-\u0441\u043f\u043e\u0440\u0435\u0434
-\u0441\u0440\u0435\u0434
-\u0441\u0440\u0435\u0449\u0443
-\u0441\u0442\u0435
-\u0441\u044a\u043c
-\u0441\u044a\u0441
-\u0441\u044a\u0449\u043e
-\u0442
-\u0442\u0430\u0437\u0438
-\u0442\u0430\u043a\u0430
-\u0442\u0430\u043a\u0438\u0432\u0430
-\u0442\u0430\u043a\u044a\u0432
-\u0442\u0430\u043c
-\u0442\u0432\u043e\u0439
-\u0442\u0435
-\u0442\u0435\u0437\u0438
-\u0442\u0438
-\u0442\u043d
-\u0442\u043e
-\u0442\u043e\u0432\u0430
-\u0442\u043e\u0433\u0430\u0432\u0430
-\u0442\u043e\u0437\u0438
-\u0442\u043e\u0439
-\u0442\u043e\u043b\u043a\u043e\u0432\u0430
-\u0442\u043e\u0447\u043d\u043e
-\u0442\u0440\u044f\u0431\u0432\u0430
-\u0442\u0443\u043a
-\u0442\u044a\u0439
-\u0442\u044f
-\u0442\u044f\u0445
-\u0443
-\u0445\u0430\u0440\u0435\u0441\u0432\u0430
-\u0447
-\u0447\u0435
-\u0447\u0435\u0441\u0442\u043e
-\u0447\u0440\u0435\u0437
-\u0449\u0435
-\u0449\u043e\u043c
-\u044f

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ca.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ca.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ca.txt
deleted file mode 100644
index 3da65de..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ca.txt
+++ /dev/null
@@ -1,220 +0,0 @@
-# Catalan stopwords from http://github.com/vcl/cue.language (Apache 2 Licensed)
-a
-abans
-ac�
-ah
-aix�
-aix�
-al
-als
-aleshores
-algun
-alguna
-algunes
-alguns
-alhora
-all�
-all�
-all�
-altra
-altre
-altres
-amb
-ambd�s
-ambdues
-apa
-aquell
-aquella
-aquelles
-aquells
-aquest
-aquesta
-aquestes
-aquests
-aqu�
-baix
-cada
-cadasc�
-cadascuna
-cadascunes
-cadascuns
-com
-contra
-d'un
-d'una
-d'unes
-d'uns
-dalt
-de
-del
-dels
-des
-despr�s
-dins
-dintre
-donat
-doncs
-durant
-e
-eh
-el
-els
-em
-en
-encara
-ens
-entre
-�rem
-eren
-�reu
-es
-�s
-esta
-est�
-est�vem
-estaven
-est�veu
-esteu
-et
-etc
-ets
-fins
-fora
-gaireb�
-ha
-han
-has
-havia
-he
-hem
-heu
-hi 
-ho
-i
-igual
-iguals
-ja
-l'hi
-la
-les
-li
-li'n
-llavors
-m'he
-ma
-mal
-malgrat
-mateix
-mateixa
-mateixes
-mateixos
-me
-mentre
-m�s
-meu
-meus
-meva
-meves
-molt
-molta
-moltes
-molts
-mon
-mons
-n'he
-n'hi
-ne
-ni
-no
-nogensmenys
-nom�s
-nosaltres
-nostra
-nostre
-nostres
-o
-oh
-oi
-on
-pas
-pel
-pels
-per
-per�
-perqu�
-poc 
-poca
-pocs
-poques
-potser
-propi
-qual
-quals
-quan
-quant 
-que
-qu�
-quelcom
-qui
-quin
-quina
-quines
-quins
-s'ha
-s'han
-sa
-semblant
-semblants
-ses
-seu 
-seus
-seva
-seva
-seves
-si
-sobre
-sobretot
-s�c
-solament
-sols
-son 
-s�n
-sons 
-sota
-sou
-t'ha
-t'han
-t'he
-ta
-tal
-tamb�
-tampoc
-tan
-tant
-tanta
-tantes
-teu
-teus
-teva
-teves
-ton
-tons
-tot
-tota
-totes
-tots
-un
-una
-unes
-uns
-us
-va
-vaig
-vam
-van
-vas
-veu
-vosaltres
-vostra
-vostre
-vostres

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ckb.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ckb.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ckb.txt
deleted file mode 100644
index 87abf11..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_ckb.txt
+++ /dev/null
@@ -1,136 +0,0 @@
-# set of kurdish stopwords
-# note these have been normalized with our scheme (e represented with U+06D5, etc)
-# constructed from:
-# * Fig 5 of "Building A Test Collection For Sorani Kurdish" (Esmaili et al)
-# * "Sorani Kurdish: A Reference Grammar with selected readings" (Thackston)
-# * Corpus-based analysis of 77M word Sorani collection: wikipedia, news, blogs, etc
-
-# and
-\u0648
-# which
-\u06a9\u06d5
-# of
-\u06cc
-# made/did
-\u06a9\u0631\u062f
-# that/which
-\u0626\u06d5\u0648\u06d5\u06cc
-# on/head
-\u0633\u06d5\u0631
-# two
-\u062f\u0648\u0648
-# also
-\u0647\u06d5\u0631\u0648\u06d5\u0647\u0627
-# from/that
-\u0644\u06d5\u0648
-# makes/does
-\u062f\u06d5\u06a9\u0627\u062a
-# some
-\u0686\u06d5\u0646\u062f
-# every
-\u0647\u06d5\u0631
-
-# demonstratives
-# that
-\u0626\u06d5\u0648
-# this
-\u0626\u06d5\u0645
-
-# personal pronouns
-# I
-\u0645\u0646
-# we
-\u0626\u06ce\u0645\u06d5
-# you
-\u062a\u06c6
-# you
-\u0626\u06ce\u0648\u06d5
-# he/she/it
-\u0626\u06d5\u0648
-# they
-\u0626\u06d5\u0648\u0627\u0646
-
-# prepositions
-# to/with/by
-\u0628\u06d5
-\u067e\u06ce
-# without
-\u0628\u06d5\u0628\u06ce
-# along with/while/during
-\u0628\u06d5\u062f\u06d5\u0645
-# in the opinion of
-\u0628\u06d5\u0644\u0627\u06cc
-# according to
-\u0628\u06d5\u067e\u06ce\u06cc
-# before
-\u0628\u06d5\u0631\u0644\u06d5
-# in the direction of
-\u0628\u06d5\u0631\u06d5\u0648\u06cc
-# in front of/toward
-\u0628\u06d5\u0631\u06d5\u0648\u06d5
-# before/in the face of
-\u0628\u06d5\u0631\u062f\u06d5\u0645
-# without
-\u0628\u06ce
-# except for
-\u0628\u06ce\u062c\u06af\u06d5
-# for
-\u0628\u06c6
-# on/in
-\u062f\u06d5
-\u062a\u06ce
-# with
-\u062f\u06d5\u06af\u06d5\u06b5
-# after
-\u062f\u0648\u0627\u06cc
-# except for/aside from
-\u062c\u06af\u06d5
-# in/from
-\u0644\u06d5
-\u0644\u06ce
-# in front of/before/because of
-\u0644\u06d5\u0628\u06d5\u0631
-# between/among
-\u0644\u06d5\u0628\u06d5\u06cc\u0646\u06cc
-# concerning/about
-\u0644\u06d5\u0628\u0627\u0628\u06d5\u062a
-# concerning
-\u0644\u06d5\u0628\u0627\u0631\u06d5\u06cc
-# instead of
-\u0644\u06d5\u0628\u0627\u062a\u06cc
-# beside
-\u0644\u06d5\u0628\u0646
-# instead of
-\u0644\u06d5\u0628\u0631\u06ce\u062a\u06cc
-# behind
-\u0644\u06d5\u062f\u06d5\u0645
-# with/together with
-\u0644\u06d5\u06af\u06d5\u06b5
-# by
-\u0644\u06d5\u0644\u0627\u06cc\u06d5\u0646
-# within
-\u0644\u06d5\u0646\u0627\u0648
-# between/among
-\u0644\u06d5\u0646\u06ce\u0648
-# for the sake of
-\u0644\u06d5\u067e\u06ce\u0646\u0627\u0648\u06cc
-# with respect to
-\u0644\u06d5\u0631\u06d5\u0648\u06cc
-# by means of/for
-\u0644\u06d5\u0631\u06ce
-# for the sake of
-\u0644\u06d5\u0631\u06ce\u06af\u0627
-# on/on top of/according to
-\u0644\u06d5\u0633\u06d5\u0631
-# under
-\u0644\u06d5\u0698\u06ce\u0631
-# between/among
-\u0646\u0627\u0648
-# between/among
-\u0646\u06ce\u0648\u0627\u0646
-# after
-\u067e\u0627\u0634
-# before
-\u067e\u06ce\u0634
-# like
-\u0648\u06d5\u06a9

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_cz.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_cz.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_cz.txt
deleted file mode 100644
index 53c6097..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_cz.txt
+++ /dev/null
@@ -1,172 +0,0 @@
-a
-s
-k
-o
-i
-u
-v
-z
-dnes
-cz
-t�mto
-bude\u0161
-budem
-byli
-jse\u0161
-m\u016fj
-sv�m
-ta
-tomto
-tohle
-tuto
-tyto
-jej
-zda
-pro\u010d
-m�te
-tato
-kam
-tohoto
-kdo
-kte\u0159�
-mi
-n�m
-tom
-tomuto
-m�t
-nic
-proto
-kterou
-byla
-toho
-proto\u017ee
-asi
-ho
-na\u0161i
-napi\u0161te
-re
-co\u017e
-t�m
-tak\u017ee
-sv�ch
-jej�
-sv�mi
-jste
-aj
-tu
-tedy
-teto
-bylo
-kde
-ke
-prav�
-ji
-nad
-nejsou
-\u010di
-pod
-t�ma
-mezi
-p\u0159es
-ty
-pak
-v�m
-ani
-kdy\u017e
-v\u0161ak
-neg
-jsem
-tento
-\u010dl�nku
-\u010dl�nky
-aby
-jsme
-p\u0159ed
-pta
-jejich
-byl
-je\u0161t\u011b
-a\u017e
-bez
-tak�
-pouze
-prvn�
-va\u0161e
-kter�
-n�s
-nov�
-tipy
-pokud
-m\u016f\u017ee
-strana
-jeho
-sv�
-jin�
-zpr�vy
-nov�
-nen�
-v�s
-jen
-podle
-zde
-u\u017e
-b�t
-v�ce
-bude
-ji\u017e
-ne\u017e
-kter�
-by
-kter�
-co
-nebo
-ten
-tak
-m�
-p\u0159i
-od
-po
-jsou
-jak
-dal\u0161�
-ale
-si
-se
-ve
-to
-jako
-za
-zp\u011bt
-ze
-do
-pro
-je
-na
-atd
-atp
-jakmile
-p\u0159i\u010dem\u017e
-j�
-on
-ona
-ono
-oni
-ony
-my
-vy
-j�
-ji
-m\u011b
-mne
-jemu
-tomu
-t\u011bm
-t\u011bmu
-n\u011bmu
-n\u011bmu\u017e
-jeho\u017e
-j�\u017e
-jeliko\u017e
-je\u017e
-jako\u017e
-na\u010de\u017e

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_da.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_da.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_da.txt
deleted file mode 100644
index 42e6145..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_da.txt
+++ /dev/null
@@ -1,110 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/danish/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A Danish stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
- | This is a ranked list (commonest to rarest) of stopwords derived from
- | a large text sample.
-
-
-og           | and
-i            | in
-jeg          | I
-det          | that (dem. pronoun)/it (pers. pronoun)
-at           | that (in front of a sentence)/to (with infinitive)
-en           | a/an
-den          | it (pers. pronoun)/that (dem. pronoun)
-til          | to/at/for/until/against/by/of/into, more
-er           | present tense of "to be"
-som          | who, as
-p�           | on/upon/in/on/at/to/after/of/with/for, on
-de           | they
-med          | with/by/in, along
-han          | he
-af           | of/by/from/off/for/in/with/on, off
-for          | at/for/to/from/by/of/ago, in front/before, because
-ikke         | not
-der          | who/which, there/those
-var          | past tense of "to be"
-mig          | me/myself
-sig          | oneself/himself/herself/itself/themselves
-men          | but
-et           | a/an/one, one (number), someone/somebody/one
-har          | present tense of "to have"
-om           | round/about/for/in/a, about/around/down, if
-vi           | we
-min          | my
-havde        | past tense of "to have"
-ham          | him
-hun          | she
-nu           | now
-over         | over/above/across/by/beyond/past/on/about, over/past
-da           | then, when/as/since
-fra          | from/off/since, off, since
-du           | you
-ud           | out
-sin          | his/her/its/one's
-dem          | them
-os           | us/ourselves
-op           | up
-man          | you/one
-hans         | his
-hvor         | where
-eller        | or
-hvad         | what
-skal         | must/shall etc.
-selv         | myself/youself/herself/ourselves etc., even
-her          | here
-alle         | all/everyone/everybody etc.
-vil          | will (verb)
-blev         | past tense of "to stay/to remain/to get/to become"
-kunne        | could
-ind          | in
-n�r          | when
-v�re         | present tense of "to be"
-dog          | however/yet/after all
-noget        | something
-ville        | would
-jo           | you know/you see (adv), yes
-deres        | their/theirs
-efter        | after/behind/according to/for/by/from, later/afterwards
-ned          | down
-skulle       | should
-denne        | this
-end          | than
-dette        | this
-mit          | my/mine
-ogs�         | also
-under        | under/beneath/below/during, below/underneath
-have         | have
-dig          | you
-anden        | other
-hende        | her
-mine         | my
-alt          | everything
-meget        | much/very, plenty of
-sit          | his, her, its, one's
-sine         | his, her, its, one's
-vor          | our
-mod          | against
-disse        | these
-hvis         | if
-din          | your/yours
-nogle        | some
-hos          | by/at
-blive        | be/become
-mange        | many
-ad           | by/through
-bliver       | present tense of "to be/to become"
-hendes       | her/hers
-v�ret        | be
-thi          | for (conj)
-jer          | you
-s�dan        | such, like this/like that

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_de.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_de.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_de.txt
deleted file mode 100644
index 86525e7..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_de.txt
+++ /dev/null
@@ -1,294 +0,0 @@
- | From svn.tartarus.org/snowball/trunk/website/algorithms/german/stop.txt
- | This file is distributed under the BSD License.
- | See http://snowball.tartarus.org/license.php
- | Also see http://www.opensource.org/licenses/bsd-license.html
- |  - Encoding was converted to UTF-8.
- |  - This notice was added.
- |
- | NOTE: To use this file with StopFilterFactory, you must specify format="snowball"
-
- | A German stop word list. Comments begin with vertical bar. Each stop
- | word is at the start of a line.
-
- | The number of forms in this list is reduced significantly by passing it
- | through the German stemmer.
-
-
-aber           |  but
-
-alle           |  all
-allem
-allen
-aller
-alles
-
-als            |  than, as
-also           |  so
-am             |  an + dem
-an             |  at
-
-ander          |  other
-andere
-anderem
-anderen
-anderer
-anderes
-anderm
-andern
-anderr
-anders
-
-auch           |  also
-auf            |  on
-aus            |  out of
-bei            |  by
-bin            |  am
-bis            |  until
-bist           |  art
-da             |  there
-damit          |  with it
-dann           |  then
-
-der            |  the
-den
-des
-dem
-die
-das
-
-da�            |  that
-
-derselbe       |  the same
-derselben
-denselben
-desselben
-demselben
-dieselbe
-dieselben
-dasselbe
-
-dazu           |  to that
-
-dein           |  thy
-deine
-deinem
-deinen
-deiner
-deines
-
-denn           |  because
-
-derer          |  of those
-dessen         |  of him
-
-dich           |  thee
-dir            |  to thee
-du             |  thou
-
-dies           |  this
-diese
-diesem
-diesen
-dieser
-dieses
-
-
-doch           |  (several meanings)
-dort           |  (over) there
-
-
-durch          |  through
-
-ein            |  a
-eine
-einem
-einen
-einer
-eines
-
-einig          |  some
-einige
-einigem
-einigen
-einiger
-einiges
-
-einmal         |  once
-
-er             |  he
-ihn            |  him
-ihm            |  to him
-
-es             |  it
-etwas          |  something
-
-euer           |  your
-eure
-eurem
-euren
-eurer
-eures
-
-f�r            |  for
-gegen          |  towards
-gewesen        |  p.p. of sein
-hab            |  have
-habe           |  have
-haben          |  have
-hat            |  has
-hatte          |  had
-hatten         |  had
-hier           |  here
-hin            |  there
-hinter         |  behind
-
-ich            |  I
-mich           |  me
-mir            |  to me
-
-
-ihr            |  you, to her
-ihre
-ihrem
-ihren
-ihrer
-ihres
-euch           |  to you
-
-im             |  in + dem
-in             |  in
-indem          |  while
-ins            |  in + das
-ist            |  is
-
-jede           |  each, every
-jedem
-jeden
-jeder
-jedes
-
-jene           |  that
-jenem
-jenen
-jener
-jenes
-
-jetzt          |  now
-kann           |  can
-
-kein           |  no
-keine
-keinem
-keinen
-keiner
-keines
-
-k�nnen         |  can
-k�nnte         |  could
-machen         |  do
-man            |  one
-
-manche         |  some, many a
-manchem
-manchen
-mancher
-manches
-
-mein           |  my
-meine
-meinem
-meinen
-meiner
-meines
-
-mit            |  with
-muss           |  must
-musste         |  had to
-nach           |  to(wards)
-nicht          |  not
-nichts         |  nothing
-noch           |  still, yet
-nun            |  now
-nur            |  only
-ob             |  whether
-oder           |  or
-ohne           |  without
-sehr           |  very
-
-sein           |  his
-seine
-seinem
-seinen
-seiner
-seines
-
-selbst         |  self
-sich           |  herself
-
-sie            |  they, she
-ihnen          |  to them
-
-sind           |  are
-so             |  so
-
-solche         |  such
-solchem
-solchen
-solcher
-solches
-
-soll           |  shall
-sollte         |  should
-sondern        |  but
-sonst          |  else
-�ber           |  over
-um             |  about, around
-und            |  and
-
-uns            |  us
-unse
-unsem
-unsen
-unser
-unses
-
-unter          |  under
-viel           |  much
-vom            |  von + dem
-von            |  from
-vor            |  before
-w�hrend        |  while
-war            |  was
-waren          |  were
-warst          |  wast
-was            |  what
-weg            |  away, off
-weil           |  because
-weiter         |  further
-
-welche         |  which
-welchem
-welchen
-welcher
-welches
-
-wenn           |  when
-werde          |  will
-werden         |  will
-wie            |  how
-wieder         |  again
-will           |  want
-wir            |  we
-wird           |  will
-wirst          |  willst
-wo             |  where
-wollen         |  want
-wollte         |  wanted
-w�rde          |  would
-w�rden         |  would
-zu             |  to
-zum            |  zu + dem
-zur            |  zu + der
-zwar           |  indeed
-zwischen       |  between
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_el.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_el.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_el.txt
deleted file mode 100644
index 232681f..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_el.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-# Lucene Greek Stopwords list
-# Note: by default this file is used after GreekLowerCaseFilter,
-# so when modifying this file use '\u03c3' instead of '\u03c2' 
-\u03bf
-\u03b7
-\u03c4\u03bf
-\u03bf\u03b9
-\u03c4\u03b1
-\u03c4\u03bf\u03c5
-\u03c4\u03b7\u03c3
-\u03c4\u03c9\u03bd
-\u03c4\u03bf\u03bd
-\u03c4\u03b7\u03bd
-\u03ba\u03b1\u03b9 
-\u03ba\u03b9
-\u03ba
-\u03b5\u03b9\u03bc\u03b1\u03b9
-\u03b5\u03b9\u03c3\u03b1\u03b9
-\u03b5\u03b9\u03bd\u03b1\u03b9
-\u03b5\u03b9\u03bc\u03b1\u03c3\u03c4\u03b5
-\u03b5\u03b9\u03c3\u03c4\u03b5
-\u03c3\u03c4\u03bf
-\u03c3\u03c4\u03bf\u03bd
-\u03c3\u03c4\u03b7
-\u03c3\u03c4\u03b7\u03bd
-\u03bc\u03b1
-\u03b1\u03bb\u03bb\u03b1
-\u03b1\u03c0\u03bf
-\u03b3\u03b9\u03b1
-\u03c0\u03c1\u03bf\u03c3
-\u03bc\u03b5
-\u03c3\u03b5
-\u03c9\u03c3
-\u03c0\u03b1\u03c1\u03b1
-\u03b1\u03bd\u03c4\u03b9
-\u03ba\u03b1\u03c4\u03b1
-\u03bc\u03b5\u03c4\u03b1
-\u03b8\u03b1
-\u03bd\u03b1
-\u03b4\u03b5
-\u03b4\u03b5\u03bd
-\u03bc\u03b7
-\u03bc\u03b7\u03bd
-\u03b5\u03c0\u03b9
-\u03b5\u03bd\u03c9
-\u03b5\u03b1\u03bd
-\u03b1\u03bd
-\u03c4\u03bf\u03c4\u03b5
-\u03c0\u03bf\u03c5
-\u03c0\u03c9\u03c3
-\u03c0\u03bf\u03b9\u03bf\u03c3
-\u03c0\u03bf\u03b9\u03b1
-\u03c0\u03bf\u03b9\u03bf
-\u03c0\u03bf\u03b9\u03bf\u03b9
-\u03c0\u03bf\u03b9\u03b5\u03c3
-\u03c0\u03bf\u03b9\u03c9\u03bd
-\u03c0\u03bf\u03b9\u03bf\u03c5\u03c3
-\u03b1\u03c5\u03c4\u03bf\u03c3
-\u03b1\u03c5\u03c4\u03b7
-\u03b1\u03c5\u03c4\u03bf
-\u03b1\u03c5\u03c4\u03bf\u03b9
-\u03b1\u03c5\u03c4\u03c9\u03bd
-\u03b1\u03c5\u03c4\u03bf\u03c5\u03c3
-\u03b1\u03c5\u03c4\u03b5\u03c3
-\u03b1\u03c5\u03c4\u03b1
-\u03b5\u03ba\u03b5\u03b9\u03bd\u03bf\u03c3
-\u03b5\u03ba\u03b5\u03b9\u03bd\u03b7
-\u03b5\u03ba\u03b5\u03b9\u03bd\u03bf
-\u03b5\u03ba\u03b5\u03b9\u03bd\u03bf\u03b9
-\u03b5\u03ba\u03b5\u03b9\u03bd\u03b5\u03c3
-\u03b5\u03ba\u03b5\u03b9\u03bd\u03b1
-\u03b5\u03ba\u03b5\u03b9\u03bd\u03c9\u03bd
-\u03b5\u03ba\u03b5\u03b9\u03bd\u03bf\u03c5\u03c3
-\u03bf\u03c0\u03c9\u03c3
-\u03bf\u03bc\u03c9\u03c3
-\u03b9\u03c3\u03c9\u03c3
-\u03bf\u03c3\u03bf
-\u03bf\u03c4\u03b9

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/lang/stopwords_en.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_en.txt b/solr/example/example-DIH/solr/rss/conf/lang/stopwords_en.txt
deleted file mode 100644
index 2c164c0..0000000
--- a/solr/example/example-DIH/solr/rss/conf/lang/stopwords_en.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# a couple of test stopwords to test that the words are really being
-# configured from this file:
-stopworda
-stopwordb
-
-# Standard english stop words taken from Lucene's StopAnalyzer
-a
-an
-and
-are
-as
-at
-be
-but
-by
-for
-if
-in
-into
-is
-it
-no
-not
-of
-on
-or
-such
-that
-the
-their
-then
-there
-these
-they
-this
-to
-was
-will
-with


[15/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10357: Enable edismax and standard query parsers to handle the option combination sow=false / autoGeneratePhraseQueries=true by setting QueryBuilder.autoGenerateMultiTermSynonymsQuery

Posted by ab...@apache.org.
SOLR-10357: Enable edismax and standard query parsers to handle the option combination sow=false / autoGeneratePhraseQueries=true by setting QueryBuilder.autoGenerateMultiTermSynonymsQuery


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/0a689f4d
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/0a689f4d
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/0a689f4d

Branch: refs/heads/jira/solr-9959
Commit: 0a689f4d95e981e99ae0e80741e7cf1fa74ff60f
Parents: 6b0217b
Author: Steve Rowe <sa...@apache.org>
Authored: Tue Mar 28 18:39:28 2017 -0400
Committer: Steve Rowe <sa...@apache.org>
Committed: Tue Mar 28 18:39:28 2017 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                |   3 +
 .../org/apache/solr/parser/QueryParser.java     |  11 +-
 .../java/org/apache/solr/parser/QueryParser.jj  |  11 +-
 .../solr/search/ExtendedDismaxQParser.java      |   8 --
 .../QueryParserConfigurationException.java      |  24 ----
 .../solr/collection1/conf/synonyms.txt          |   1 +
 .../solr/search/TestExtendedDismaxParser.java   | 141 +++++++++++++------
 .../apache/solr/search/TestSolrQueryParser.java |  48 +++++--
 8 files changed, 148 insertions(+), 99 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0a689f4d/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 49300fe..9d14e59 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -164,6 +164,9 @@ Other Changes
 * SOLR-10365: Handle a SolrCoreInitializationException while publishing core state during SolrCore creation
   (Ishan Chattopadhyaya)
 
+* SOLR-10357: Enable edismax and standard query parsers to handle the option combination 
+  sow=false / autoGeneratePhraseQueries="true" by setting QueryBuilder.autoGenerateMultiTermSynonymsQuery.
+
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0a689f4d/solr/core/src/java/org/apache/solr/parser/QueryParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/parser/QueryParser.java b/solr/core/src/java/org/apache/solr/parser/QueryParser.java
index d9a64f4..e846c6e 100644
--- a/solr/core/src/java/org/apache/solr/parser/QueryParser.java
+++ b/solr/core/src/java/org/apache/solr/parser/QueryParser.java
@@ -13,7 +13,6 @@ import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.Query;
 import org.apache.solr.search.SyntaxError;
 import org.apache.solr.search.QParser;
-import org.apache.solr.search.QueryParserConfigurationException;
 
 
 public class QueryParser extends SolrQueryParserBase implements QueryParserConstants {
@@ -54,11 +53,11 @@ public class QueryParser extends SolrQueryParserBase implements QueryParserConst
   @Override
   protected Query newFieldQuery(Analyzer analyzer, String field, String queryText,
                                 boolean quoted, boolean fieldAutoGenPhraseQueries) throws SyntaxError {
-    if ((getAutoGeneratePhraseQueries() || fieldAutoGenPhraseQueries) && splitOnWhitespace == false) {
-      throw new QueryParserConfigurationException
-          ("Field '" + field + "': autoGeneratePhraseQueries == true is disallowed when sow/splitOnWhitespace == false");
-    }
-    return super.newFieldQuery(analyzer, field, queryText, quoted, fieldAutoGenPhraseQueries);
+    setAutoGenerateMultiTermSynonymsPhraseQuery(fieldAutoGenPhraseQueries || getAutoGeneratePhraseQueries());
+    // Don't auto-quote graph-aware field queries 
+    boolean treatAsQuoted = getSplitOnWhitespace()
+        ? (quoted || fieldAutoGenPhraseQueries || getAutoGeneratePhraseQueries()) : quoted;
+    return super.newFieldQuery(analyzer, field, queryText, treatAsQuoted, false);
   }
 
 // *   Query  ::= ( Clause )*

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0a689f4d/solr/core/src/java/org/apache/solr/parser/QueryParser.jj
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/parser/QueryParser.jj b/solr/core/src/java/org/apache/solr/parser/QueryParser.jj
index 5eb4ec7..d4d6539 100644
--- a/solr/core/src/java/org/apache/solr/parser/QueryParser.jj
+++ b/solr/core/src/java/org/apache/solr/parser/QueryParser.jj
@@ -37,7 +37,6 @@ import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.Query;
 import org.apache.solr.search.SyntaxError;
 import org.apache.solr.search.QParser;
-import org.apache.solr.search.QueryParserConfigurationException;
 
 
 public class QueryParser extends SolrQueryParserBase {
@@ -78,11 +77,11 @@ public class QueryParser extends SolrQueryParserBase {
   @Override
   protected Query newFieldQuery(Analyzer analyzer, String field, String queryText,
                                 boolean quoted, boolean fieldAutoGenPhraseQueries) throws SyntaxError {
-    if ((getAutoGeneratePhraseQueries() || fieldAutoGenPhraseQueries) && splitOnWhitespace == false) {
-      throw new QueryParserConfigurationException
-          ("Field '" + field + "': autoGeneratePhraseQueries == true is disallowed when sow/splitOnWhitespace == false");
-    }
-    return super.newFieldQuery(analyzer, field, queryText, quoted, fieldAutoGenPhraseQueries);
+    setAutoGenerateMultiTermSynonymsPhraseQuery(fieldAutoGenPhraseQueries || getAutoGeneratePhraseQueries());
+    // Don't auto-quote graph-aware field queries 
+    boolean treatAsQuoted = getSplitOnWhitespace()
+        ? (quoted || fieldAutoGenPhraseQueries || getAutoGeneratePhraseQueries()) : quoted;
+    return super.newFieldQuery(analyzer, field, queryText, treatAsQuoted, false);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0a689f4d/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
index a4f9312..9825f72 100644
--- a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
+++ b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
@@ -310,8 +310,6 @@ public class ExtendedDismaxQParser extends QParser {
         up.setRemoveStopFilter(true);
         query = up.parse(mainUserQuery);          
       }
-    } catch (QueryParserConfigurationException e) {
-      throw e; // Don't ignore configuration exceptions
     } catch (Exception e) {
       // ignore failure and reparse later after escaping reserved chars
       up.exceptions = false;
@@ -1083,10 +1081,6 @@ public class ExtendedDismaxQParser extends QParser {
     @Override
     protected Query newFieldQuery(Analyzer analyzer, String field, String queryText, 
                                   boolean quoted, boolean fieldAutoGenPhraseQueries) throws SyntaxError {
-      if ((getAutoGeneratePhraseQueries() || fieldAutoGenPhraseQueries) && getSplitOnWhitespace() == false) {
-        throw new QueryParserConfigurationException
-            ("Field '" + field + "': autoGeneratePhraseQueries == true is disallowed when sow/splitOnWhitespace == false");
-      }
       Analyzer actualAnalyzer;
       if (removeStopFilter) {
         if (nonStopFilterAnalyzerPerField == null) {
@@ -1403,8 +1397,6 @@ public class ExtendedDismaxQParser extends QParser {
         }
         return null;
         
-      } catch (QueryParserConfigurationException e) {
-        throw e;  // Don't ignore configuration exceptions
       } catch (Exception e) {
         // an exception here is due to the field query not being compatible with the input text
         // for example, passing a string to a numeric field.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0a689f4d/solr/core/src/java/org/apache/solr/search/QueryParserConfigurationException.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/QueryParserConfigurationException.java b/solr/core/src/java/org/apache/solr/search/QueryParserConfigurationException.java
deleted file mode 100644
index 0dd2a33..0000000
--- a/solr/core/src/java/org/apache/solr/search/QueryParserConfigurationException.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.solr.search;
-
-public class QueryParserConfigurationException extends IllegalArgumentException {
-  public QueryParserConfigurationException(String message) {
-    super(message);
-  }
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0a689f4d/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/synonyms.txt b/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
index f64cb72..54cf2cc 100644
--- a/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
+++ b/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
@@ -31,3 +31,4 @@ pixima => pixma
 
 # multiword synonyms
 wi fi => wifi
+crow blackbird, grackle
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0a689f4d/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
index a887fed..c4d8bec 100644
--- a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
@@ -21,6 +21,7 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
+import java.util.stream.Stream;
 
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
@@ -34,7 +35,6 @@ import org.apache.solr.common.SolrException;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.schema.TextField;
 import org.apache.solr.util.SolrPluginUtils;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -66,7 +66,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
                  "foo_i", "8"
     ));
     assertU(adoc("id", "47", "trait_ss", "Pig",
-            "text_sw", "line up and fly directly at the enemy death cannons, clogging them with wreckage!"));
+            "text", "line up and fly directly at the enemy death cannons, clogging them with wreckage!"));
     assertU(adoc("id", "48", "text_sw", "this has gigabyte potential", "foo_i","100"));
     assertU(adoc("id", "49", "text_sw", "start the big apple end", "foo_i","-100"));
     assertU(adoc("id", "50", "text_sw", "start new big city end"));
@@ -121,22 +121,22 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
       // really just test that exceptions aren't thrown by
       // single + -
 
-      assertJQ(req("defType", "edismax", "q", "-", "df", "text_sw", "sow", sow)
+      assertJQ(req("defType", "edismax", "q", "-", "sow", sow)
           , "/response==");
 
-      assertJQ(req("defType", "edismax", "q", "+", "df", "text_sw", "sow", sow)
+      assertJQ(req("defType", "edismax", "q", "+", "sow", sow)
           , "/response==");
 
-      assertJQ(req("defType", "edismax", "q", "+ - +", "df", "text_sw", "sow", sow)
+      assertJQ(req("defType", "edismax", "q", "+ - +", "sow", sow)
           , "/response==");
 
-      assertJQ(req("defType", "edismax", "q", "- + -", "df", "text_sw", "sow", sow)
+      assertJQ(req("defType", "edismax", "q", "- + -", "sow", sow)
           , "/response==");
 
-      assertJQ(req("defType", "edismax", "q", "id:47 +", "df", "text_sw", "sow", sow)
+      assertJQ(req("defType", "edismax", "q", "id:47 +", "sow", sow)
           , "/response/numFound==1");
 
-      assertJQ(req("defType", "edismax", "q", "id:47 -", "df", "text_sw", "sow", sow)
+      assertJQ(req("defType", "edismax", "q", "id:47 -", "sow", sow)
           , "/response/numFound==1");
 
       Random r = random();
@@ -152,7 +152,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
         }
 
         String q = sb.toString();
-        assertJQ(req("defType", "edismax", "q", q, "df", "text_sw", "sow", sow)
+        assertJQ(req("defType", "edismax", "q", q, "sow", sow)
             , "/response==");
       }
     }
@@ -264,7 +264,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
            , twor
            );
    
-   assertQ(req("defType", "edismax", "qf", "name title subject text_sw",
+   assertQ(req("defType", "edismax", "qf", "name title subject text",
                "q","op"), twor
     );
    assertQ(req("defType", "edismax", 
@@ -277,29 +277,29 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
                "q.op", "OR",
                "q","Order op"), twor
     );
-   assertQ(req("defType", "edismax", "qf", "name title subject text_sw",
+   assertQ(req("defType", "edismax", "qf", "name title subject text",
                "q","Order AND op"), oner
     );
-   assertQ(req("defType", "edismax", "qf", "name title subject text_sw",
+   assertQ(req("defType", "edismax", "qf", "name title subject text",
                "q","Order and op"), oner
     );
-    assertQ(req("defType", "edismax", "qf", "name title subject text_sw",
+    assertQ(req("defType", "edismax", "qf", "name title subject text",
                "q","+Order op"), oner
     );
-    assertQ(req("defType", "edismax", "qf", "name title subject text_sw",
+    assertQ(req("defType", "edismax", "qf", "name title subject text",
                "q","Order OR op"), twor
     );
-    assertQ(req("defType", "edismax", "qf", "name title subject text_sw",
+    assertQ(req("defType", "edismax", "qf", "name title subject text",
                "q","Order or op"), twor
     );
-    assertQ(req("defType", "edismax", "qf", "name title subject text_sw",
+    assertQ(req("defType", "edismax", "qf", "name title subject text",
                "q","*:*"), allr
     );
 
-    assertQ(req("defType", "edismax", "qf", "name title subject text_sw",
+    assertQ(req("defType", "edismax", "qf", "name title subject text",
            "q","star OR (-star)"), allr
     );
-    assertQ(req("defType", "edismax", "qf", "name title subject text_sw",
+    assertQ(req("defType", "edismax", "qf", "name title subject text",
            "q","id:42 OR (-id:42)"), allr
     );
 
@@ -551,7 +551,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
         "//str[@name='parsedquery_toString'][.='+(id:42)^5.0']");
     
     
-    assertQ(req("defType","edismax", "uf","-*", "q","cannons", "qf","text_sw"),
+    assertQ(req("defType","edismax", "uf","-*", "q","cannons", "qf","text"),
         oner);
     
     assertQ(req("defType","edismax", "uf","* -id", "q","42", "qf", "id"), oner);
@@ -885,7 +885,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
         "*[count(//doc)=3]");
     assertQ(
         "Might be double-escaping a client-escaped colon", 
-        req("q", "text_sw:(theos OR thistokenhasa\\:preescapedcolon OR theou)", "defType", "edismax", "qf", "text_sw"),
+        req("q", "text_sw:(theos OR thistokenhasa\\:preescapedcolon OR theou)", "defType", "edismax", "qf", "text"),
         "*[count(//doc)=3]");    
     
   }
@@ -1047,56 +1047,56 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
     // "line up and fly directly at the enemy death cannons, clogging them with wreckage!"
     assertQ("test default operator with mm (AND + 0% => 0 hits)",
         req("q", "(line notfound) OR notfound",
-            "qf", "text_sw",
+            "qf", "text",
             "q.op", "AND",
             "mm", "0%",
             "defType", "edismax")
         , "*[count(//doc)=0]");
     assertQ("test default operator with mm (OR + 0% => 1 hit)",
         req("q", "line notfound OR notfound",
-            "qf", "text_sw",
+            "qf", "text",
             "q.op", "OR",
             "mm", "0%",
             "defType", "edismax")
         , "*[count(//doc)=1]");
     assertQ("test default operator with mm (OR + 100% => 0 hits)",
         req("q", "line notfound OR notfound",
-            "qf", "text_sw",
+            "qf", "text",
             "q.op", "OR",
             "mm", "100%",
             "defType", "edismax")
         , "*[count(//doc)=0]");
     assertQ("test default operator with mm (OR + 35% => 1 hit)",
         req("q", "line notfound notfound2 OR notfound",
-            "qf", "text_sw",
+            "qf", "text",
             "q.op", "OR",
             "mm", "35%",
             "defType", "edismax")
         , "*[count(//doc)=1]");
     assertQ("test default operator with mm (OR + 75% => 0 hits)",
         req("q", "line notfound notfound2 OR notfound3",
-            "qf", "text_sw",
+            "qf", "text",
             "q.op", "OR",
             "mm", "75%",
             "defType", "edismax")
         , "*[count(//doc)=0]");
     assertQ("test default operator with mm (AND + 0% => 1 hit)",
         req("q", "(line enemy) OR notfound",
-            "qf", "text_sw",
+            "qf", "text",
             "q.op", "AND",
             "mm", "0%",
             "defType", "edismax")
         , "*[count(//doc)=1]");
     assertQ("test default operator with mm (AND + 50% => 1 hit)",
         req("q", "(line enemy) OR (line notfound) OR (death cannons) OR (death notfound)",
-            "qf", "text_sw",
+            "qf", "text",
             "q.op", "AND",
             "mm", "50%",
             "defType", "edismax")
         , "*[count(//doc)=1]");
     assertQ("test default operator with mm (AND + 75% => 0 hits)",
         req("q", "(line enemy) OR (line notfound) OR (death cannons) OR (death notfound)",
-            "qf", "text_sw",
+            "qf", "text",
             "q.op", "AND",
             "mm", "75%",
             "defType", "edismax")
@@ -1394,16 +1394,6 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
     
   }
 
-  // LUCENE-7533
-  public void testSplitOnWhitespace_with_autoGeneratePhraseQueries() throws Exception {
-    assertTrue(((TextField)h.getCore().getLatestSchema().getField("text").getType()).getAutoGeneratePhraseQueries());
-
-    try (SolrQueryRequest req = req()) {
-      final QParser qparser = QParser.getParser("{!edismax sow=false fq=text}blah blah)", req);
-      expectThrows(IllegalArgumentException.class, qparser::getQuery);
-    }
-  }
-
   @Test
   public void testSplitOnWhitespace_Basic() throws Exception {
     // The "text_sw" field has synonyms loaded from synonyms.txt
@@ -1550,7 +1540,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
         , "/response/numFound==0"
     );
     assertJQ(req("qf","text_sw title", "defType","edismax", "q","wi* fi", "sow","false")
-        , "/response/numFound==2"    // matches because wi* matches "wifi" in one doc and "with" in another
+        , "/response/numFound==1"    // matches because wi* matches "wifi"
     );
     assertJQ(req("qf","text_sw title", "defType","edismax", "q","w? fi", "sow","false")
         , "/response/numFound==0"
@@ -1720,7 +1710,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
         , "/response/numFound==1"
     );
     assertJQ(req("qf","text_sw title", "defType","edismax", "q","AT* wi fi", "sow","false")
-        , "/response/numFound==2"
+        , "/response/numFound==1"
     );
     assertJQ(req("qf","text_sw title", "defType","edismax", "q","AT? wi fi", "sow","false")
         , "/response/numFound==1"
@@ -1750,7 +1740,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
         , "/response/numFound==1"
     );
     assertJQ(req("qf","text_sw title", "defType","edismax", "q","wi fi AT*", "sow","false")
-        , "/response/numFound==2"
+        , "/response/numFound==1"
     );
     assertJQ(req("qf","text_sw title", "defType","edismax", "q","wi fi AT?", "sow","false")
         , "/response/numFound==1"
@@ -1765,11 +1755,74 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
         , "/response/numFound==1"
     );
   }
-  
-  
+
+  public void testAutoGeneratePhraseQueries() throws Exception {
+    ModifiableSolrParams noSowParams = new ModifiableSolrParams();
+    ModifiableSolrParams sowFalseParams = new ModifiableSolrParams();
+    sowFalseParams.add("sow", "false");
+    ModifiableSolrParams sowTrueParams = new ModifiableSolrParams();
+    sowTrueParams.add("sow", "true");
+
+    // From synonyms.txt:
+    //
+    //     crow blackbird, grackle
+
+    try (SolrQueryRequest req = req(sowFalseParams)) {
+      QParser qParser = QParser.getParser("text:grackle", "edismax", req); // "text" has autoGeneratePhraseQueries="true"
+      Query q = qParser.getQuery();
+      assertEquals("+(text:\"crow blackbird\" text:grackl)", q.toString());
+    }
+    for (SolrParams params : Arrays.asList(noSowParams, sowTrueParams)) {
+      try (SolrQueryRequest req = req(params)) {
+        QParser qParser = QParser.getParser("text:grackle", "edismax", req);
+        Query q = qParser.getQuery();
+        assertEquals("+spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])", q.toString());
+      }
+    }
+    for (SolrParams params : Arrays.asList(noSowParams, sowTrueParams, sowFalseParams)) {
+      try (SolrQueryRequest req = req(params)) {
+        QParser qParser = QParser.getParser("text_sw:grackle", "edismax", req); // "text_sw" doesn't specify autoGeneratePhraseQueries => default false
+        Query q = qParser.getQuery();
+        assertEquals("+((+text_sw:crow +text_sw:blackbird) text_sw:grackl)", q.toString());
+      }
+    }
+
+    Stream.of(noSowParams, sowTrueParams, sowFalseParams).forEach(p->p.add("qf", "text text_sw"));
+
+    try (SolrQueryRequest req = req(sowFalseParams)) {
+      QParser qParser = QParser.getParser("grackle", "edismax", req);
+      Query q = qParser.getQuery();
+      assertEquals("+((text:\"crow blackbird\" text:grackl)"
+              + " | ((+text_sw:crow +text_sw:blackbird) text_sw:grackl))",
+          q.toString());
+
+      qParser = QParser.getParser("grackle wi fi", "edismax", req);
+      q = qParser.getQuery();
+      assertEquals("+(((text:\"crow blackbird\" text:grackl) text:wifi)"
+              + " | (((+text_sw:crow +text_sw:blackbird) text_sw:grackl) text_sw:wifi))",
+          q.toString());
+    }
+    
+    for (SolrParams params : Arrays.asList(noSowParams, sowTrueParams)) {
+      try (SolrQueryRequest req = req(params)) {
+        QParser qParser = QParser.getParser("grackle", "edismax", req);
+        Query q = qParser.getQuery();
+        assertEquals("+(spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])"
+                + " | ((+text_sw:crow +text_sw:blackbird) text_sw:grackl))",
+            q.toString());
+
+        qParser = QParser.getParser("grackle wi fi", "edismax", req);
+        q = qParser.getQuery();
+        assertEquals("+((spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])"
+            + " | ((+text_sw:crow +text_sw:blackbird) text_sw:grackl)) (text:wi | text_sw:wi) (text:fi | text_sw:fi))",
+            q.toString());
+      }
+    }
+  }
+
 
   private boolean containsClause(Query query, String field, String value,
-      int boost, boolean fuzzy) {
+                                 int boost, boolean fuzzy) {
 
     float queryBoost = 1f;
     if (query instanceof BoostQuery) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/0a689f4d/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
index 92bd6c0..607f091 100644
--- a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java
@@ -34,11 +34,12 @@ import org.apache.lucene.search.TermInSetQuery;
 import org.apache.lucene.search.TermQuery;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.params.MapSolrParams;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.core.SolrInfoMBean;
 import org.apache.solr.parser.QueryParser;
 import org.apache.solr.query.FilterQuery;
 import org.apache.solr.request.SolrQueryRequest;
-import org.apache.solr.schema.TextField;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.noggit.ObjectBuilder;
@@ -573,16 +574,6 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
     req.close();
   }
 
-  // LUCENE-7533
-  public void testSplitOnWhitespace_with_autoGeneratePhraseQueries() throws Exception {
-    assertTrue(((TextField)h.getCore().getLatestSchema().getField("text").getType()).getAutoGeneratePhraseQueries());
-    
-    try (SolrQueryRequest req = req()) {
-      final QParser qparser = QParser.getParser("{!lucene sow=false qf=text}blah blah", req);
-      expectThrows(QueryParserConfigurationException.class, qparser::getQuery);
-    }
-  }
-
   @Test
   public void testSplitOnWhitespace_Basic() throws Exception {
     // The "syn" field has synonyms loaded from synonyms.txt
@@ -969,4 +960,39 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
         , "/response/numFound==1"
     );
   }
+
+  @Test
+  public void testAutoGeneratePhraseQueries() throws Exception {
+    ModifiableSolrParams noSowParams = new ModifiableSolrParams();
+    ModifiableSolrParams sowFalseParams = new ModifiableSolrParams();
+    sowFalseParams.add("sow", "false");
+    ModifiableSolrParams sowTrueParams = new ModifiableSolrParams();
+    sowTrueParams.add("sow", "true");
+
+    // From synonyms.txt:
+    //
+    //     crow blackbird, grackle
+    //
+    try (SolrQueryRequest req = req()) {
+
+      QParser qParser = QParser.getParser("text:grackle", req); // "text" has autoGeneratePhraseQueries="true"
+      qParser.setParams(sowFalseParams);
+      Query q = qParser.getQuery();
+      assertEquals("text:\"crow blackbird\" text:grackl", q.toString());
+
+      for (SolrParams params : Arrays.asList(noSowParams, sowTrueParams)) {
+        qParser = QParser.getParser("text:grackle", req);
+        qParser.setParams(params);
+        q = qParser.getQuery();
+        assertEquals("spanOr([spanNear([text:crow, text:blackbird], 0, true), text:grackl])", q.toString());
+      }
+
+      for (SolrParams params : Arrays.asList(noSowParams, sowTrueParams, sowFalseParams)) {
+        qParser = QParser.getParser("text_sw:grackle", req); // "text_sw" doesn't specify autoGeneratePhraseQueries => default false
+        qParser.setParams(params);
+        q = qParser.getQuery();
+        assertEquals("(+text_sw:crow +text_sw:blackbird) text_sw:grackl", q.toString());
+      }
+    }
+  }
 }
\ No newline at end of file


[43/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-9601: DIH Tika example is now minimal Only keep definitions and files required to show Tika-extraction in DIH

Posted by ab...@apache.org.
SOLR-9601: DIH Tika example is now minimal
Only keep definitions and files required to show Tika-extraction in DIH


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b02626de
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b02626de
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b02626de

Branch: refs/heads/jira/solr-9959
Commit: b02626de5071c543eb6e8deea450266218238c9e
Parents: 580f6e9
Author: Alexandre Rafalovitch <ar...@apache.org>
Authored: Sat Apr 1 19:06:50 2017 -0400
Committer: Alexandre Rafalovitch <ar...@apache.org>
Committed: Sat Apr 1 19:06:50 2017 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                |    2 +
 .../example-DIH/solr/tika/conf/admin-extra.html |   24 -
 .../solr/tika/conf/admin-extra.menu-bottom.html |   25 -
 .../solr/tika/conf/admin-extra.menu-top.html    |   25 -
 .../example-DIH/solr/tika/conf/managed-schema   |  910 +-----------
 .../example-DIH/solr/tika/conf/solrconfig.xml   | 1354 +-----------------
 .../solr/tika/conf/tika-data-config.xml         |   33 +-
 7 files changed, 72 insertions(+), 2301 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b02626de/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 3187dc3..99edab4 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -182,6 +182,8 @@ Other Changes
 
 * SOLR-7383: New DataImportHandler 'atom' example, replacing broken 'rss' example (Alexandre Rafalovitch)
 
+* SOLR-9601: Redone DataImportHandler 'tika' example, removing all unused and irrelevant definitions (Alexandre Rafalovitch)
+
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b02626de/solr/example/example-DIH/solr/tika/conf/admin-extra.html
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/tika/conf/admin-extra.html b/solr/example/example-DIH/solr/tika/conf/admin-extra.html
deleted file mode 100644
index fecab20..0000000
--- a/solr/example/example-DIH/solr/tika/conf/admin-extra.html
+++ /dev/null
@@ -1,24 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- The content of this page will be statically included into the top-
-right box of the cores overview page. Uncomment this as an example to 
-see there the content will show up.
-
-<img src="img/ico/construction.png"> This line will appear at the top-
-right box on collection1's Overview
--->

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b02626de/solr/example/example-DIH/solr/tika/conf/admin-extra.menu-bottom.html
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/tika/conf/admin-extra.menu-bottom.html b/solr/example/example-DIH/solr/tika/conf/admin-extra.menu-bottom.html
deleted file mode 100644
index 3359a46..0000000
--- a/solr/example/example-DIH/solr/tika/conf/admin-extra.menu-bottom.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- admin-extra.menu-bottom.html -->
-<!--
-<li>
-  <a href="#" style="background-image: url(img/ico/construction.png);">
-    LAST ITEM
-  </a>
-</li>
--->

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b02626de/solr/example/example-DIH/solr/tika/conf/admin-extra.menu-top.html
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/tika/conf/admin-extra.menu-top.html b/solr/example/example-DIH/solr/tika/conf/admin-extra.menu-top.html
deleted file mode 100644
index 0886cee..0000000
--- a/solr/example/example-DIH/solr/tika/conf/admin-extra.menu-top.html
+++ /dev/null
@@ -1,25 +0,0 @@
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- admin-extra.menu-top.html -->
-<!--
-<li>
-  <a href="#" style="background-image: url(img/ico/construction.png);">
-    FIRST ITEM
-  </a>
-</li>
--->

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b02626de/solr/example/example-DIH/solr/tika/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/tika/conf/managed-schema b/solr/example/example-DIH/solr/tika/conf/managed-schema
index 58b2a80..6d506f7 100644
--- a/solr/example/example-DIH/solr/tika/conf/managed-schema
+++ b/solr/example/example-DIH/solr/tika/conf/managed-schema
@@ -16,897 +16,39 @@
  limitations under the License.
 -->
 
-<!--  
- This is the Solr schema file. This file should be named "schema.xml" and
- should be in the conf directory under the solr home
- (i.e. ./solr/conf/schema.xml by default) 
- or located where the classloader for the Solr webapp can find it.
-
- This example schema is the recommended starting point for users.
- It should be kept correct and concise, usable out-of-the-box.
-
- For more information, on how to customize this file, please see
- http://wiki.apache.org/solr/SchemaXml
-
- PERFORMANCE NOTE: this schema includes many optional features and should not
- be used for benchmarking.  To improve performance one could
-  - set stored="false" for all fields possible (esp large fields) when you
-    only need to search on the field but don't need to return the original
-    value.
-  - set indexed="false" if you don't need to search on the field, but only
-    return the field as a result of searching on other indexed fields.
-  - remove all unneeded copyField statements
-  - for best index size and searching performance, set "index" to false
-    for all general text fields, use copyField to copy them to the
-    catchall "text" field, and use that for searching.
-  - For maximum indexing performance, use the ConcurrentUpdateSolrServer
-    java client.
-  - Remember to run the JVM in server mode, and use a higher logging level
-    that avoids logging every request
--->
-
 <schema name="example-DIH-tika" version="1.6">
-  <!-- attribute "name" is the name of this schema and is only used for display purposes.
-       version="x.y" is Solr's version number for the schema syntax and 
-       semantics.  It should not normally be changed by applications.
-
-       1.0: multiValued attribute did not exist, all fields are multiValued 
-            by nature
-       1.1: multiValued attribute introduced, false by default 
-       1.2: omitTermFreqAndPositions attribute introduced, true by default 
-            except for text fields.
-       1.3: removed optional field compress feature
-       1.4: autoGeneratePhraseQueries attribute introduced to drive QueryParser
-            behavior when a single string produces multiple tokens.  Defaults 
-            to off for version >= 1.4
-       1.5: omitNorms defaults to true for primitive field types 
-            (int, float, boolean, string...)
-       1.6: useDocValuesAsStored defaults to true.            
-     -->
-
-
-   <!-- Valid attributes for fields:
-     name: mandatory - the name for the field
-     type: mandatory - the name of a field type from the 
-       fieldTypes
-     indexed: true if this field should be indexed (searchable or sortable)
-     stored: true if this field should be retrievable
-     docValues: true if this field should have doc values. Doc values are
-       useful for faceting, grouping, sorting and function queries. Although not
-       required, doc values will make the index faster to load, more
-       NRT-friendly and more memory-efficient. They however come with some
-       limitations: they are currently only supported by StrField, UUIDField
-       and all Trie*Fields, and depending on the field type, they might
-       require the field to be single-valued, be required or have a default
-       value (check the documentation of the field type you're interested in
-       for more information)
-     multiValued: true if this field may contain multiple values per document
-     omitNorms: (expert) set to true to omit the norms associated with
-       this field (this disables length normalization and index-time
-       boosting for the field, and saves some memory).  Only full-text
-       fields or fields that need an index-time boost need norms.
-       Norms are omitted for primitive (non-analyzed) types by default.
-     termVectors: [false] set to true to store the term vector for a
-       given field.
-       When using MoreLikeThis, fields used for similarity should be
-       stored for best performance.
-     termPositions: Store position information with the term vector.  
-       This will increase storage costs.
-     termOffsets: Store offset information with the term vector. This 
-       will increase storage costs.
-     required: The field is required.  It will throw an error if the
-       value does not exist
-     default: a value that should be used if no value is specified
-       when adding a document.
-   -->
-
-   <!-- field names should consist of alphanumeric or underscore characters only and
-      not start with a digit.  This is not currently strictly enforced,
-      but other field names will not have first class support from all components
-      and back compatibility is not guaranteed.  Names with both leading and
-      trailing underscores (e.g. _version_) are reserved.
-   -->
-
-   <field name="title" type="text_general" indexed="true" stored="true" multiValued="true"/>
-   <field name="author" type="text_general" indexed="true" stored="true"/>
-   <field name="text" type="text_general" indexed="true" stored="false" multiValued="true"/>
-
-
-   <!-- Dynamic field definitions allow using convention over configuration
-       for fields via the specification of patterns to match field names. 
-       EXAMPLE:  name="*_i" will match any field ending in _i (like myid_i, z_i)
-       RESTRICTION: the glob-like pattern in the name attribute must have
-       a "*" only at the start or the end.  -->
-   
-   <dynamicField name="*_i"  type="int"    indexed="true"  stored="true"/>
-   <dynamicField name="*_is" type="int"    indexed="true"  stored="true"  multiValued="true"/>
-   <dynamicField name="*_s"  type="string"  indexed="true"  stored="true" />
-   <dynamicField name="*_ss" type="string"  indexed="true"  stored="true" multiValued="true"/>
-   <dynamicField name="*_l"  type="long"   indexed="true"  stored="true"/>
-   <dynamicField name="*_ls" type="long"   indexed="true"  stored="true"  multiValued="true"/>
-   <dynamicField name="*_t"  type="text_general"    indexed="true"  stored="true"/>
-   <dynamicField name="*_txt" type="text_general"   indexed="true"  stored="true" multiValued="true"/>
-   <dynamicField name="*_en"  type="text_en"    indexed="true"  stored="true" multiValued="true"/>
-   <dynamicField name="*_b"  type="boolean" indexed="true" stored="true"/>
-   <dynamicField name="*_bs" type="boolean" indexed="true" stored="true"  multiValued="true"/>
-   <dynamicField name="*_f"  type="float"  indexed="true"  stored="true"/>
-   <dynamicField name="*_fs" type="float"  indexed="true"  stored="true"  multiValued="true"/>
-   <dynamicField name="*_d"  type="double" indexed="true"  stored="true"/>
-   <dynamicField name="*_ds" type="double" indexed="true"  stored="true"  multiValued="true"/>
-
-   <!-- Type used to index the lat and lon components for the "location" FieldType -->
-   <dynamicField name="*_coordinate"  type="tdouble" indexed="true"  stored="false" />
-
-   <dynamicField name="*_dt"  type="date"    indexed="true"  stored="true"/>
-   <dynamicField name="*_dts" type="date"    indexed="true"  stored="true" multiValued="true"/>
-   <dynamicField name="*_p"  type="location" indexed="true" stored="true"/>
-
-   <!-- some trie-coded dynamic fields for faster range queries -->
-   <dynamicField name="*_ti" type="tint"    indexed="true"  stored="true"/>
-   <dynamicField name="*_tl" type="tlong"   indexed="true"  stored="true"/>
-   <dynamicField name="*_tf" type="tfloat"  indexed="true"  stored="true"/>
-   <dynamicField name="*_td" type="tdouble" indexed="true"  stored="true"/>
-   <dynamicField name="*_tdt" type="tdate"  indexed="true"  stored="true"/>
-
-   <dynamicField name="ignored_*" type="ignored" multiValued="true"/>
-   <dynamicField name="attr_*" type="text_general" indexed="true" stored="true" multiValued="true"/>
-
-   <dynamicField name="random_*" type="random" />
-
-   <!-- uncomment the following to ignore any fields that don't already match an existing 
-        field name or dynamic field, rather than reporting them as an error. 
-        alternately, change the type="ignored" to some other type e.g. "text" if you want 
-        unknown fields indexed and/or stored by default --> 
-   <!--dynamicField name="*" type="ignored" multiValued="true" /-->
-   
-
-
-
- <!-- Field to use to determine and enforce document uniqueness. 
-      Unless this field is marked with required="false", it will be a required field
-   -->
- <!-- <uniqueKey>id</uniqueKey> -->
-
- <!-- DEPRECATED: The defaultSearchField is consulted by various query parsers when
-  parsing a query string that isn't explicit about the field.  Machine (non-user)
-  generated queries are best made explicit, or they can use the "df" request parameter
-  which takes precedence over this.
-  Note: Un-commenting defaultSearchField will be insufficient if your request handler
-  in solrconfig.xml defines "df", which takes precedence. That would need to be removed.
- <defaultSearchField>text</defaultSearchField> -->
-
- <!-- DEPRECATED: The defaultOperator (AND|OR) is consulted by various query parsers
-  when parsing a query string to determine if a clause of the query should be marked as
-  required or optional, assuming the clause isn't already marked by some operator.
-  The default is OR, which is generally assumed so it is not a good idea to change it
-  globally here.  The "q.op" request parameter takes precedence over this.
- <solrQueryParser defaultOperator="OR"/> -->
-  
-    <!-- field type definitions. The "name" attribute is
-       just a label to be used by field definitions.  The "class"
-       attribute and any other attributes determine the real
-       behavior of the fieldType.
-         Class names starting with "solr" refer to java classes in a
-       standard package such as org.apache.solr.analysis
-    -->
-
-    <!-- The StrField type is not analyzed, but indexed/stored verbatim.
-       It supports doc values but in that case the field needs to be
-       single-valued and either required or have a default value.
-      -->
-    <fieldType name="string" class="solr.StrField" sortMissingLast="true" />
-
-    <!-- boolean type: "true" or "false" -->
-    <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
-
-    <!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
-         currently supported on types that are sorted internally as strings
-         and on numeric types.
-       This includes "string","boolean", and, as of 3.5 (and 4.x),
-       int, float, long, date, double, including the "Trie" variants.
-       - If sortMissingLast="true", then a sort on this field will cause documents
-         without the field to come after documents with the field,
-         regardless of the requested sort order (asc or desc).
-       - If sortMissingFirst="true", then a sort on this field will cause documents
-         without the field to come before documents with the field,
-         regardless of the requested sort order.
-       - If sortMissingLast="false" and sortMissingFirst="false" (the default),
-         then default lucene sorting will be used which places docs without the
-         field first in an ascending sort and last in a descending sort.
-    -->    
-
-    <!--
-      Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
-
-      These fields support doc values, but they require the field to be
-      single-valued and either be required or have a default value.
-    -->
-    <fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
-    <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
-    <fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
-    <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
-
-    <!--
-     Numeric field types that index each value at various levels of precision
-     to accelerate range queries when the number of values between the range
-     endpoints is large. See the javadoc for NumericRangeQuery for internal
-     implementation details.
-
-     Smaller precisionStep values (specified in bits) will lead to more tokens
-     indexed per value, slightly larger index size, and faster range queries.
-     A precisionStep of 0 disables indexing at different precision levels.
-    -->
-    <fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
-    <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
-    <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
-    <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
-
-    <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
-         is a more restricted form of the canonical representation of dateTime
-         http://www.w3.org/TR/xmlschema-2/#dateTime    
-         The trailing "Z" designates UTC time and is mandatory.
-         Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z
-         All other components are mandatory.
-
-         Expressions can also be used to denote calculations that should be
-         performed relative to "NOW" to determine the value, ie...
-
-               NOW/HOUR
-                  ... Round to the start of the current hour
-               NOW-1DAY
-                  ... Exactly 1 day prior to now
-               NOW/DAY+6MONTHS+3DAYS
-                  ... 6 months and 3 days in the future from the start of
-                      the current day
-                      
-         Consult the TrieDateField javadocs for more information.
-
-         Note: For faster range queries, consider the tdate type
-      -->
-    <fieldType name="date" class="solr.TrieDateField" precisionStep="0" positionIncrementGap="0"/>
 
-    <!-- A Trie based date field for faster date range queries and date faceting. -->
-    <fieldType name="tdate" class="solr.TrieDateField" precisionStep="6" positionIncrementGap="0"/>
+  <uniqueKey>id</uniqueKey>
 
+  <field name="id" type="string" indexed="true" stored="true"/>
+  <field name="author" type="text_simple" indexed="true" stored="true"/>
+  <field name="title" type="text_simple" indexed="true" stored="true" multiValued="true"/>
+  <field name="format" type="string" indexed="true" stored="true"/>
 
-    <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings -->
-    <fieldType name="binary" class="solr.BinaryField"/>
+  <!-- field "text" is searchable (it is the default search field) but it is not stored to save space -->
+  <field name="text" type="text_simple" indexed="true" stored="false" multiValued="true"/>
 
-    <!-- The "RandomSortField" is not used to store or search any
-         data.  You can declare fields of this type it in your schema
-         to generate pseudo-random orderings of your docs for sorting 
-         or function purposes.  The ordering is generated based on the field
-         name and the version of the index. As long as the index version
-         remains unchanged, and the same field name is reused,
-         the ordering of the docs will be consistent.  
-         If you want different psuedo-random orderings of documents,
-         for the same version of the index, use a dynamicField and
-         change the field name in the request.
-     -->
-    <fieldType name="random" class="solr.RandomSortField" indexed="true" />
 
-    <!-- solr.TextField allows the specification of custom text analyzers
-         specified as a tokenizer and a list of token filters. Different
-         analyzers may be specified for indexing and querying.
-
-         The optional positionIncrementGap puts space between multiple fields of
-         this type on the same document, with the purpose of preventing false phrase
-         matching across fields.
-
-         For more info on customizing your analyzer chain, please see
-         http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters
-     -->
-
-    <!-- One can also specify an existing Analyzer class that has a
-         default constructor via the class attribute on the analyzer element.
-         Example:
-    <fieldType name="text_greek" class="solr.TextField">
-      <analyzer class="org.apache.lucene.analysis.el.GreekAnalyzer"/>
-    </fieldType>
-    -->
-
-    <!-- A text field that only splits on whitespace for exact matching of words -->
-    <fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- A general text field that has reasonable, generic
-         cross-language defaults: it tokenizes with StandardTokenizer,
-         and down cases. -->
-    <fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
-      <analyzer type="index">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- A text field with defaults appropriate for English: it
-         tokenizes with StandardTokenizer, down cases, and
-         finally applies Porter's stemming. -->
-    <fieldType name="text_en" class="solr.TextField" positionIncrementGap="100">
-      <analyzer type="index">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-  <filter class="solr.EnglishPossessiveFilterFactory"/>
-  <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
-        <filter class="solr.EnglishMinimalStemFilterFactory"/>
-  -->
-        <filter class="solr.PorterStemFilterFactory"/>
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-  <filter class="solr.EnglishPossessiveFilterFactory"/>
-  <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
-        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+  <!-- Uncomment the dynamicField definition to catch any other fields
+   that may have been declared in the DIH configuration.
+   This allows to speed up prototyping.
   -->
-        <filter class="solr.PorterStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- A text field with defaults appropriate for English, plus
-   aggressive word-splitting and autophrase features enabled.
-   This field is just like text_en, except it adds
-   WordDelimiterGraphFilter to enable splitting and matching of
-   words on case-change, alpha numeric boundaries, and
-   non-alphanumeric chars.  This means certain compound word
-   cases will work, for example query "wi fi" will match
-   document "WiFi" or "wi-fi".
-        -->
-    <fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer type="index">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.PorterStemFilterFactory"/>
-        <filter class="solr.FlattenGraphFilterFactory" />
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.PorterStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
-         but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
-    <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
-      <analyzer type="index">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.EnglishMinimalStemFilterFactory"/>
-        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
-        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-        <filter class="solr.FlattenGraphFilterFactory" />
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.EnglishMinimalStemFilterFactory"/>
-        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
-             possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
-        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- Just like text_general except it reverses the characters of
-   each token, to enable more efficient leading wildcard queries. -->
-    <fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
-      <analyzer type="index">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.ReversedWildcardFilterFactory" withOriginal="true"
-           maxPosAsterisk="3" maxPosQuestion="2" maxFractionAsterisk="0.33"/>
-      </analyzer>
-      <analyzer type="query">
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- charFilter + WhitespaceTokenizer  -->
-    <!--
-    <fieldType name="text_char_norm" class="solr.TextField" positionIncrementGap="100" >
-      <analyzer>
-        <charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      </analyzer>
-    </fieldType>
-    -->
-
-    <!-- This is an example of using the KeywordTokenizer along
-         With various TokenFilterFactories to produce a sortable field
-         that does not include some properties of the source text
-      -->
-    <fieldType name="alphaOnlySort" class="solr.TextField" sortMissingLast="true" omitNorms="true">
-      <analyzer>
-        <!-- KeywordTokenizer does no actual tokenizing, so the entire
-             input string is preserved as a single token
-          -->
-        <tokenizer class="solr.KeywordTokenizerFactory"/>
-        <!-- The LowerCase TokenFilter does what you expect, which can be
-             when you want your sorting to be case insensitive
-          -->
-        <filter class="solr.LowerCaseFilterFactory" />
-        <!-- The TrimFilter removes any leading or trailing whitespace -->
-        <filter class="solr.TrimFilterFactory" />
-        <!-- The PatternReplaceFilter gives you the flexibility to use
-             Java Regular expression to replace any sequence of characters
-             matching a pattern with an arbitrary replacement string, 
-             which may include back references to portions of the original
-             string matched by the pattern.
-             
-             See the Java Regular Expression documentation for more
-             information on pattern and replacement string syntax.
-             
-             http://docs.oracle.com/javase/7/docs/api/java/util/regex/package-summary.html
-          -->
-        <filter class="solr.PatternReplaceFilterFactory"
-                pattern="([^a-z])" replacement="" replace="all"
-        />
-      </analyzer>
-    </fieldType>
-    
-    <fieldType name="phonetic" stored="false" indexed="true" class="solr.TextField" >
-      <analyzer>
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.DoubleMetaphoneFilterFactory" inject="false"/>
-      </analyzer>
-    </fieldType>
-
-    <fieldType name="payloads" stored="false" indexed="true" class="solr.TextField" >
-      <analyzer>
-        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <!--
-        The DelimitedPayloadTokenFilter can put payloads on tokens... for example,
-        a token of "foo|1.4"  would be indexed as "foo" with a payload of 1.4f
-        Attributes of the DelimitedPayloadTokenFilterFactory : 
-         "delimiter" - a one character delimiter. Default is | (pipe)
-   "encoder" - how to encode the following value into a playload
-      float -> org.apache.lucene.analysis.payloads.FloatEncoder,
-      integer -> o.a.l.a.p.IntegerEncoder
-      identity -> o.a.l.a.p.IdentityEncoder
-            Fully Qualified class name implementing PayloadEncoder, Encoder must have a no arg constructor.
-         -->
-        <filter class="solr.DelimitedPayloadTokenFilterFactory" encoder="float"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- lowercases the entire field value, keeping it as a single token.  -->
-    <fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <tokenizer class="solr.KeywordTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory" />
-      </analyzer>
-    </fieldType>
-
-    <!-- 
-      Example of using PathHierarchyTokenizerFactory at index time, so
-      queries for paths match documents at that path, or in descendent paths
-    -->
-    <fieldType name="descendent_path" class="solr.TextField">
-      <analyzer type="index">
-  <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
-      </analyzer>
-      <analyzer type="query">
-  <tokenizer class="solr.KeywordTokenizerFactory" />
-      </analyzer>
-    </fieldType>
-    <!-- 
-      Example of using PathHierarchyTokenizerFactory at query time, so
-      queries for paths match documents at that path, or in ancestor paths
-    -->
-    <fieldType name="ancestor_path" class="solr.TextField">
-      <analyzer type="index">
-  <tokenizer class="solr.KeywordTokenizerFactory" />
-      </analyzer>
-      <analyzer type="query">
-  <tokenizer class="solr.PathHierarchyTokenizerFactory" delimiter="/" />
-      </analyzer>
-    </fieldType>
-
-    <!-- since fields of this type are by default not stored or indexed,
-         any data added to them will be ignored outright.  --> 
-    <fieldType name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
-
-    <!-- This point type indexes the coordinates as separate fields (subFields)
-      If subFieldType is defined, it references a type, and a dynamic field
-      definition is created matching *___<typename>.  Alternately, if 
-      subFieldSuffix is defined, that is used to create the subFields.
-      Example: if subFieldType="double", then the coordinates would be
-        indexed in fields myloc_0___double,myloc_1___double.
-      Example: if subFieldSuffix="_d" then the coordinates would be indexed
-        in fields myloc_0_d,myloc_1_d
-      The subFields are an implementation detail of the fieldType, and end
-      users normally should not need to know about them.
-     -->
-    <fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d"/>
+  <!-- <dynamicField name="*" type="string" indexed="true" stored="true" multiValued="true"/> -->
 
-    <!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. -->
-    <fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
+  <!-- The StrField type is not analyzed, but is indexed/stored verbatim. -->
+  <fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
 
-    <!-- An alternative geospatial field type new to Solr 4.  It supports multiValued and polygon shapes.
-      For more information about this and other Spatial fields new to Solr 4, see:
-      http://wiki.apache.org/solr/SolrAdaptersForLuceneSpatial4
-    -->
-    <fieldType name="location_rpt" class="solr.SpatialRecursivePrefixTreeFieldType"
-        geo="true" distErrPct="0.025" maxDistErr="0.001" distanceUnits="kilometers" />
 
-
-
-
-   <!-- some examples for different languages (generally ordered by ISO code) -->
-
-    <!-- Arabic -->
-    <fieldType name="text_ar" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- for any non-arabic -->
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <!-- normalizes \ufeef to \ufef1, etc -->
-        <filter class="solr.ArabicNormalizationFilterFactory"/>
-        <filter class="solr.ArabicStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- Bulgarian -->
-    <fieldType name="text_bg" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/> 
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.BulgarianStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Catalan -->
-    <fieldType name="text_ca" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Catalan"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- CJK bigram (see text_ja for a Japanese configuration using morphological analysis) -->
-    <fieldType name="text_cjk" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- normalize width before bigram, as e.g. half-width dakuten combine  -->
-        <filter class="solr.CJKWidthFilterFactory"/>
-        <!-- for any non-CJK -->
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.CJKBigramFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- Kurdish -->
-    <fieldType name="text_ckb" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SoraniNormalizationFilterFactory"/>
-        <!-- for any latin text -->
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SoraniStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-
-    <!-- Czech -->
-    <fieldType name="text_cz" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.CzechStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Danish -->
-    <fieldType name="text_da" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Danish"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- German -->
-    <fieldType name="text_de" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.GermanNormalizationFilterFactory"/>
-        <filter class="solr.GermanLightStemFilterFactory"/>
-        <!-- less aggressive: <filter class="solr.GermanMinimalStemFilterFactory"/> -->
-        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="German2"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Greek -->
-    <fieldType name="text_el" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <!-- greek specific lowercase for sigma -->
-        <filter class="solr.GreekLowerCaseFilterFactory"/>
-        <filter class="solr.GreekStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Spanish -->
-    <fieldType name="text_es" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SpanishLightStemFilterFactory"/>
-        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="Spanish"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Basque -->
-    <fieldType name="text_eu" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Basque"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Persian -->
-    <fieldType name="text_fa" class="solr.TextField" positionIncrementGap="100">
-      <analyzer>
-        <!-- for ZWNJ -->
-        <charFilter class="solr.PersianCharFilterFactory"/>
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.ArabicNormalizationFilterFactory"/>
-        <filter class="solr.PersianNormalizationFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Finnish -->
-    <fieldType name="text_fi" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Finnish"/>
-        <!-- less aggressive: <filter class="solr.FinnishLightStemFilterFactory"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- French -->
-    <fieldType name="text_fr" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.FrenchLightStemFilterFactory"/>
-        <!-- less aggressive: <filter class="solr.FrenchMinimalStemFilterFactory"/> -->
-        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="French"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Irish -->
-    <fieldType name="text_ga" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.IrishLowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Irish"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Galician -->
-    <fieldType name="text_gl" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.GalicianStemFilterFactory"/>
-        <!-- less aggressive: <filter class="solr.GalicianMinimalStemFilterFactory"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Hindi -->
-    <fieldType name="text_hi" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <!-- normalizes unicode representation -->
-        <filter class="solr.IndicNormalizationFilterFactory"/>
-        <!-- normalizes variation in spelling -->
-        <filter class="solr.HindiNormalizationFilterFactory"/>
-        <filter class="solr.HindiStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Hungarian -->
-    <fieldType name="text_hu" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Hungarian"/>
-        <!-- less aggressive: <filter class="solr.HungarianLightStemFilterFactory"/> -->   
-      </analyzer>
-    </fieldType>
-    
-    <!-- Armenian -->
-    <fieldType name="text_hy" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Armenian"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Indonesian -->
-    <fieldType name="text_id" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <!-- for a less aggressive approach (only inflectional suffixes), set stemDerivational to false -->
-        <filter class="solr.IndonesianStemFilterFactory" stemDerivational="true"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Italian -->
-    <fieldType name="text_it" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.ItalianLightStemFilterFactory"/>
-        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="Italian"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Japanese using morphological analysis (see text_cjk for a configuration using bigramming)
-
-         NOTE: If you want to optimize search for precision, use default operator AND in your query
-         parser config with <solrQueryParser defaultOperator="AND"/> further down in this file.  Use 
-         OR if you would like to optimize for recall (default).
-    -->
-    <fieldType name="text_ja" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="false">
-      <analyzer>
-      <!-- Kuromoji Japanese morphological analyzer/tokenizer (JapaneseTokenizer)
-
-           Kuromoji has a search mode (default) that does segmentation useful for search.  A heuristic
-           is used to segment compounds into its parts and the compound itself is kept as synonym.
-
-           Valid values for attribute mode are:
-              normal: regular segmentation
-              search: segmentation useful for search with synonyms compounds (default)
-            extended: same as search mode, but unigrams unknown words (experimental)
-
-           For some applications it might be good to use search mode for indexing and normal mode for
-           queries to reduce recall and prevent parts of compounds from being matched and highlighted.
-           Use <analyzer type="index"> and <analyzer type="query"> for this and mode normal in query.
-
-           Kuromoji also has a convenient user dictionary feature that allows overriding the statistical
-           model with your own entries for segmentation, part-of-speech tags and readings without a need
-           to specify weights.  Notice that user dictionaries have not been subject to extensive testing.
-
-           User dictionary attributes are:
-                     userDictionary: user dictionary filename
-             userDictionaryEncoding: user dictionary encoding (default is UTF-8)
-
-           Punctuation characters are discarded by default.  Use discardPunctuation="false" to keep them.
-
-           See http://wiki.apache.org/solr/JapaneseLanguageSupport for more on Japanese language support.
-        -->
-        <tokenizer class="solr.JapaneseTokenizerFactory" mode="search"/>
-        <!-- Reduces inflected verbs and adjectives to their base/dictionary forms (\u8f9e\u66f8\u5f62) -->
-        <filter class="solr.JapaneseBaseFormFilterFactory"/>
-        <!-- Normalizes full-width romaji to half-width and half-width kana to full-width (Unicode NFKC subset) -->
-        <filter class="solr.CJKWidthFilterFactory"/>
-        <!-- Normalizes common katakana spelling variations by removing any last long sound character (U+30FC) -->
-        <filter class="solr.JapaneseKatakanaStemFilterFactory" minimumLength="4"/>
-        <!-- Lower-cases romaji characters -->
-        <filter class="solr.LowerCaseFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Latvian -->
-    <fieldType name="text_lv" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.LatvianStemFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Dutch -->
-    <fieldType name="text_nl" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Dutch"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Norwegian -->
-    <fieldType name="text_no" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Norwegian"/>
-        <!-- less aggressive: <filter class="solr.NorwegianLightStemFilterFactory" variant="nb"/> -->
-        <!-- singular/plural: <filter class="solr.NorwegianMinimalStemFilterFactory" variant="nb"/> -->
-        <!-- The "light" and "minimal" stemmers support variants: nb=Bokm�l, nn=Nynorsk, no=Both -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Portuguese -->
-    <fieldType name="text_pt" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.PortugueseLightStemFilterFactory"/>
-        <!-- less aggressive: <filter class="solr.PortugueseMinimalStemFilterFactory"/> -->
-        <!-- more aggressive: <filter class="solr.SnowballPorterFilterFactory" language="Portuguese"/> -->
-        <!-- most aggressive: <filter class="solr.PortugueseStemFilterFactory"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Romanian -->
-    <fieldType name="text_ro" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Romanian"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Russian -->
-    <fieldType name="text_ru" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Russian"/>
-        <!-- less aggressive: <filter class="solr.RussianLightStemFilterFactory"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Swedish -->
-    <fieldType name="text_sv" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Swedish"/>
-        <!-- less aggressive: <filter class="solr.SwedishLightStemFilterFactory"/> -->
-      </analyzer>
-    </fieldType>
-    
-    <!-- Thai -->
-    <fieldType name="text_th" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.ThaiTokenizerFactory"/>
-        <filter class="solr.LowerCaseFilterFactory"/>
-      </analyzer>
-    </fieldType>
-    
-    <!-- Turkish -->
-    <fieldType name="text_tr" class="solr.TextField" positionIncrementGap="100">
-      <analyzer> 
-        <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.ApostropheFilterFactory"/>
-        <filter class="solr.TurkishLowerCaseFilterFactory"/>
-        <filter class="solr.SnowballPorterFilterFactory" language="Turkish"/>
-      </analyzer>
-    </fieldType>
-  
-  <!-- Similarity is the scoring routine for each document vs. a query.
-       A custom Similarity or SimilarityFactory may be specified here, but 
-       the default is fine for most applications.  
-       For more info: http://wiki.apache.org/solr/SchemaXml#Similarity
-    -->
-  <!--
-     <similarity class="com.example.solr.CustomSimilarityFactory">
-       <str name="paramkey">param value</str>
-     </similarity>
-    -->
-
-</schema>
+  <!-- A basic text field that has reasonable, generic
+   cross-language defaults: it tokenizes with StandardTokenizer,
+   and down cases. It does not deal with stopwords or other issues.
+   See other examples for alternative definitions.
+  -->
+  <fieldType name="text_simple" class="solr.TextField" positionIncrementGap="100">
+    <analyzer>
+      <tokenizer class="solr.StandardTokenizerFactory"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+    </analyzer>
+  </fieldType>
+
+</schema>
\ No newline at end of file


[31/52] [abbrv] lucene-solr:jira/solr-9959: LUCENE-7753: Make fields static when possible.

Posted by ab...@apache.org.
LUCENE-7753: Make fields static when possible.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e80643e5
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e80643e5
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e80643e5

Branch: refs/heads/jira/solr-9959
Commit: e80643e5a77297ba7ee29c7297e37af6ffb73ac2
Parents: 23c6ea2
Author: Adrien Grand <jp...@gmail.com>
Authored: Fri Mar 31 16:22:45 2017 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Fri Mar 31 17:28:21 2017 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                              |   3 +
 .../byTask/feeds/EnwikiContentSourceTest.java   |   4 +-
 .../apache/lucene/index/TestCustomNorms.java    |  14 +-
 .../test/org/apache/lucene/index/TestNorms.java |  12 +-
 .../lucene/index/TestSameTokenSamePosition.java |   4 +-
 .../lucene/search/TestAutomatonQuery.java       |   2 +-
 .../search/TestAutomatonQueryUnicode.java       |   2 +-
 .../TestEarlyTerminatingSortingCollector.java   |  12 +-
 .../apache/lucene/search/TestRegexpQuery.java   |   2 +-
 .../lucene/search/TestSearchWithThreads.java    |   2 +-
 .../apache/lucene/store/TestRAMDirectory.java   |  26 ++--
 .../grouping/DistinctValuesCollectorTest.java   |  48 +++----
 .../uhighlight/LengthGoalBreakIteratorTest.java |  26 ++--
 .../vectorhighlight/AbstractTestCase.java       |   6 +-
 .../surround/parser/QueryParser.java            |  41 +++---
 .../queryparser/surround/parser/QueryParser.jj  |  45 +++----
 .../queryparser/surround/query/FieldsQuery.java |   4 +-
 .../lucene/spatial3d/geom/GeoBBoxTest.java      |   2 +-
 .../mockrandom/MockRandomPostingsFormat.java    |   2 +-
 .../request/AnalyticsContentHandler.java        |  44 +++---
 .../handler/dataimport/TestDocBuilder2.java     |  48 +++----
 .../dataimport/TestHierarchicalDocBuilder.java  |  16 +--
 .../org/apache/solr/cloud/DistributedMap.java   |  10 +-
 .../apache/solr/cloud/OverseerTaskQueue.java    |   6 +-
 .../org/apache/solr/schema/CurrencyField.java   |   2 +-
 .../org/apache/solr/update/CommitTracker.java   |   6 +-
 .../org/apache/solr/util/SimplePostTool.java    |   2 +-
 .../org/apache/solr/cloud/OverseerTest.java     | 134 +++++++++----------
 .../cloud/SegmentTerminateEarlyTestState.java   |  78 +++++------
 .../org/apache/solr/cloud/SolrXmlInZkTest.java  |   2 +-
 .../apache/solr/cloud/TestSegmentSorting.java   |   2 +-
 .../solr/core/OpenCloseCoreStressTest.java      |   6 +-
 .../solr/schema/SpatialRPTFieldTypeTest.java    |  12 +-
 .../org/apache/solr/search/TestRTGBase.java     |   2 +-
 .../org/apache/solr/search/TestRealTimeGet.java |   4 +-
 .../apache/solr/search/TestReloadDeadlock.java  |   2 +-
 .../apache/solr/search/TestStressLucene.java    |   8 +-
 .../apache/solr/search/TestStressRecovery.java  |   4 +-
 .../apache/solr/search/TestStressReorder.java   |   4 +-
 .../solr/search/TestStressUserVersions.java     |   6 +-
 .../apache/solr/search/TestStressVersions.java  |   4 +-
 .../solr/common/util/TestJavaBinCodec.java      |   4 +-
 42 files changed, 326 insertions(+), 337 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index c8a8deb..e0827e7 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -80,6 +80,9 @@ Other
 
 * LUCENE-7681: MemoryIndex uses new DocValues API (Alan Woodward)
 
+* LUCENE-7753: Make fields static when possible.
+  (Daniel Jelinski via Adrien Grand)
+
 ======================= Lucene 6.6.0 =======================
 
 Other

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSourceTest.java
----------------------------------------------------------------------
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSourceTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSourceTest.java
index 9e76700..d5533c2 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSourceTest.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSourceTest.java
@@ -61,7 +61,7 @@ public class EnwikiContentSourceTest extends LuceneTestCase {
     });
   }
   
-  private final String PAGE1 = 
+  private static final String PAGE1 =
       "  <page>\r\n" + 
       "    <title>Title1</title>\r\n" + 
       "    <ns>0</ns>\r\n" + 
@@ -80,7 +80,7 @@ public class EnwikiContentSourceTest extends LuceneTestCase {
       "    </revision>\r\n" + 
       "  </page>\r\n";
 
-  private final String PAGE2 = 
+  private static final String PAGE2 =
       "  <page>\r\n" + 
           "    <title>Title2</title>\r\n" + 
           "    <ns>0</ns>\r\n" + 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
index f193140..a811192 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java
@@ -38,8 +38,8 @@ import org.apache.lucene.util.TestUtil;
  * 
  */
 public class TestCustomNorms extends LuceneTestCase {
-  final String floatTestField = "normsTestFloat";
-  final String exceptionTestField = "normsTestExcp";
+  static final String FLOAT_TEST_FIELD = "normsTestFloat";
+  static final String EXCEPTION_TEST_FIELD = "normsTestExcp";
 
   public void testFloatNorms() throws IOException {
 
@@ -57,11 +57,11 @@ public class TestCustomNorms extends LuceneTestCase {
       Document doc = docs.nextDoc();
       int boost = TestUtil.nextInt(random(), 1, 10);
       String value = IntStream.range(0, boost).mapToObj(k -> Integer.toString(boost)).collect(Collectors.joining(" "));
-      Field f = new TextField(floatTestField, value, Field.Store.YES);
+      Field f = new TextField(FLOAT_TEST_FIELD, value, Field.Store.YES);
 
       doc.add(f);
       writer.addDocument(doc);
-      doc.removeField(floatTestField);
+      doc.removeField(FLOAT_TEST_FIELD);
       if (rarely()) {
         writer.commit();
       }
@@ -69,11 +69,11 @@ public class TestCustomNorms extends LuceneTestCase {
     writer.commit();
     writer.close();
     DirectoryReader open = DirectoryReader.open(dir);
-    NumericDocValues norms = MultiDocValues.getNormValues(open, floatTestField);
+    NumericDocValues norms = MultiDocValues.getNormValues(open, FLOAT_TEST_FIELD);
     assertNotNull(norms);
     for (int i = 0; i < open.maxDoc(); i++) {
       Document document = open.document(i);
-      int expected = Integer.parseInt(document.get(floatTestField).split(" ")[0]);
+      int expected = Integer.parseInt(document.get(FLOAT_TEST_FIELD).split(" ")[0]);
       assertEquals(i, norms.nextDoc());
       assertEquals(expected, norms.longValue());
     }
@@ -87,7 +87,7 @@ public class TestCustomNorms extends LuceneTestCase {
 
     @Override
     public Similarity get(String field) {
-      if (floatTestField.equals(field)) {
+      if (FLOAT_TEST_FIELD.equals(field)) {
         return new FloatEncodingBoostSimilarity();
       } else {
         return delegate;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
index 16ce61d..64c0649 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestNorms.java
@@ -48,7 +48,7 @@ import org.apache.lucene.util.TestUtil;
 @SuppressCodecs({ "Memory", "Direct", "SimpleText" })
 @Slow
 public class TestNorms extends LuceneTestCase {
-  final String byteTestField = "normsTestByte";
+  static final String BYTE_TEST_FIELD = "normsTestByte";
 
   static class CustomNormEncodingSimilarity extends TFIDFSimilarity {
 
@@ -115,11 +115,11 @@ public class TestNorms extends LuceneTestCase {
     Directory dir = newFSDirectory(createTempDir("TestNorms.testMaxByteNorms"));
     buildIndex(dir);
     DirectoryReader open = DirectoryReader.open(dir);
-    NumericDocValues normValues = MultiDocValues.getNormValues(open, byteTestField);
+    NumericDocValues normValues = MultiDocValues.getNormValues(open, BYTE_TEST_FIELD);
     assertNotNull(normValues);
     for (int i = 0; i < open.maxDoc(); i++) {
       Document document = open.document(i);
-      int expected = Integer.parseInt(document.get(byteTestField).split(" ")[0]);
+      int expected = Integer.parseInt(document.get(BYTE_TEST_FIELD).split(" ")[0]);
       assertEquals(i, normValues.nextDoc());
       assertEquals(expected, normValues.longValue());
     }
@@ -143,10 +143,10 @@ public class TestNorms extends LuceneTestCase {
       Document doc = docs.nextDoc();
       int boost = TestUtil.nextInt(random, 1, 255);
       String value = IntStream.range(0, boost).mapToObj(k -> Integer.toString(boost)).collect(Collectors.joining(" "));
-      Field f = new TextField(byteTestField, value, Field.Store.YES);
+      Field f = new TextField(BYTE_TEST_FIELD, value, Field.Store.YES);
       doc.add(f);
       writer.addDocument(doc);
-      doc.removeField(byteTestField);
+      doc.removeField(BYTE_TEST_FIELD);
       if (rarely()) {
         writer.commit();
       }
@@ -162,7 +162,7 @@ public class TestNorms extends LuceneTestCase {
 
     @Override
     public Similarity get(String field) {
-      if (byteTestField.equals(field)) {
+      if (BYTE_TEST_FIELD.equals(field)) {
         return new ByteEncodingBoostSimilarity();
       } else {
         return delegate;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java b/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
index 4bb2318..5d3c091 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java
@@ -64,7 +64,7 @@ final class BugReproTokenStream extends TokenStream {
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
   private final PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
-  private final int tokenCount = 4;
+  private static final int TOKEN_COUNT = 4;
   private int nextTokenIndex = 0;
   private final String terms[] = new String[]{"six", "six", "drunken", "drunken"};
   private final int starts[] = new int[]{0, 0, 4, 4};
@@ -73,7 +73,7 @@ final class BugReproTokenStream extends TokenStream {
 
   @Override
   public boolean incrementToken() {
-    if (nextTokenIndex < tokenCount) {
+    if (nextTokenIndex < TOKEN_COUNT) {
       termAtt.setEmpty().append(terms[nextTokenIndex]);
       offsetAtt.setOffset(starts[nextTokenIndex], ends[nextTokenIndex]);
       posIncAtt.setPositionIncrement(incs[nextTokenIndex]);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
index d4c865f..3da69f0 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
@@ -49,7 +49,7 @@ public class TestAutomatonQuery extends LuceneTestCase {
   private IndexReader reader;
   private IndexSearcher searcher;
 
-  private final String FN = "field";
+  private static final String FN = "field";
   
   @Override
   public void setUp() throws Exception {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
index 7b10364..2af3098 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
@@ -39,7 +39,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase {
   private IndexSearcher searcher;
   private Directory directory;
 
-  private final String FN = "field";
+  private static final String FN = "field";
 
   @Override
   public void setUp() throws Exception {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/core/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
index 6d699e8..217834c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestEarlyTerminatingSortingCollector.java
@@ -35,14 +35,6 @@ import org.apache.lucene.index.QueryTimeout;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.SerialMergeScheduler;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortField;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.TopFieldCollector;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.TestUtil;
@@ -57,7 +49,7 @@ public class TestEarlyTerminatingSortingCollector extends LuceneTestCase {
   private final Sort sort = new Sort(new SortField("ndv1", SortField.Type.LONG));
   private RandomIndexWriter iw;
   private IndexReader reader;
-  private final int forceMergeMaxSegmentCount = 5;
+  private static final int FORCE_MERGE_MAX_SEGMENT_COUNT = 5;
 
   private Document randomDocument() {
     final Document doc = new Document();
@@ -107,7 +99,7 @@ public class TestEarlyTerminatingSortingCollector extends LuceneTestCase {
       iw.forceMerge(1);
     }
     else if (random().nextBoolean()) {
-      iw.forceMerge(forceMergeMaxSegmentCount);
+      iw.forceMerge(FORCE_MERGE_MAX_SEGMENT_COUNT);
     }
     reader = iw.getReader();
   }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/core/src/test/org/apache/lucene/search/TestRegexpQuery.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestRegexpQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestRegexpQuery.java
index b1cbe1e..505b26f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestRegexpQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestRegexpQuery.java
@@ -42,7 +42,7 @@ public class TestRegexpQuery extends LuceneTestCase {
   private IndexSearcher searcher;
   private IndexReader reader;
   private Directory directory;
-  private final String FN = "field";
+  private static final String FN = "field";
   
   @Override
   public void setUp() throws Exception {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java b/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
index 95c0b8e..0008dc7 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
@@ -32,7 +32,7 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
 @SuppressCodecs({ "SimpleText", "Memory", "Direct" })
 public class TestSearchWithThreads extends LuceneTestCase {
   int NUM_DOCS;
-  final int NUM_SEARCH_THREADS = 5;
+  static final int NUM_SEARCH_THREADS = 5;
   int RUN_TIME_MSEC;
   
   @Override

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/core/src/test/org/apache/lucene/store/TestRAMDirectory.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/test/org/apache/lucene/store/TestRAMDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestRAMDirectory.java
index d4e8bfc..f0f2d46 100644
--- a/lucene/core/src/test/org/apache/lucene/store/TestRAMDirectory.java
+++ b/lucene/core/src/test/org/apache/lucene/store/TestRAMDirectory.java
@@ -49,7 +49,7 @@ public class TestRAMDirectory extends BaseDirectoryTestCase {
   }
   
   // add enough document so that the index will be larger than RAMDirectory.READ_BUFFER_SIZE
-  private final int docsToAdd = 500;
+  private static final int DOCS_TO_ADD = 500;
 
   private Path buildIndex() throws IOException {
     Path path = createTempDir("buildIndex");
@@ -59,12 +59,12 @@ public class TestRAMDirectory extends BaseDirectoryTestCase {
         new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE));
     // add some documents
     Document doc = null;
-    for (int i = 0; i < docsToAdd; i++) {
+    for (int i = 0; i < DOCS_TO_ADD; i++) {
       doc = new Document();
       doc.add(newStringField("content", English.intToEnglish(i).trim(), Field.Store.YES));
       writer.addDocument(doc);
     }
-    assertEquals(docsToAdd, writer.maxDoc());
+    assertEquals(DOCS_TO_ADD, writer.maxDoc());
     writer.close();
     dir.close();
 
@@ -100,13 +100,13 @@ public class TestRAMDirectory extends BaseDirectoryTestCase {
     
     // open reader to test document count
     IndexReader reader = DirectoryReader.open(ramDir);
-    assertEquals(docsToAdd, reader.numDocs());
+    assertEquals(DOCS_TO_ADD, reader.numDocs());
     
     // open search zo check if all doc's are there
     IndexSearcher searcher = newSearcher(reader);
     
     // search for all documents
-    for (int i = 0; i < docsToAdd; i++) {
+    for (int i = 0; i < DOCS_TO_ADD; i++) {
       Document doc = searcher.doc(i);
       assertTrue(doc.getField("content") != null);
     }
@@ -115,8 +115,8 @@ public class TestRAMDirectory extends BaseDirectoryTestCase {
     reader.close();
   }
   
-  private final int numThreads = 10;
-  private final int docsPerThread = 40;
+  private static final int NUM_THREADS = 10;
+  private static final int DOCS_PER_THREAD = 40;
   
   public void testRAMDirectorySize() throws IOException, InterruptedException {
 
@@ -132,15 +132,15 @@ public class TestRAMDirectory extends BaseDirectoryTestCase {
     
     assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes());
     
-    Thread[] threads = new Thread[numThreads];
-    for (int i=0; i<numThreads; i++) {
+    Thread[] threads = new Thread[NUM_THREADS];
+    for (int i = 0; i< NUM_THREADS; i++) {
       final int num = i;
       threads[i] = new Thread(){
         @Override
         public void run() {
-          for (int j=1; j<docsPerThread; j++) {
+          for (int j = 1; j< DOCS_PER_THREAD; j++) {
             Document doc = new Document();
-            doc.add(newStringField("sizeContent", English.intToEnglish(num*docsPerThread+j).trim(), Field.Store.YES));
+            doc.add(newStringField("sizeContent", English.intToEnglish(num* DOCS_PER_THREAD +j).trim(), Field.Store.YES));
             try {
               writer.addDocument(doc);
             } catch (IOException e) {
@@ -150,10 +150,10 @@ public class TestRAMDirectory extends BaseDirectoryTestCase {
         }
       };
     }
-    for (int i=0; i<numThreads; i++) {
+    for (int i = 0; i< NUM_THREADS; i++) {
       threads[i].start();
     }
-    for (int i=0; i<numThreads; i++) {
+    for (int i = 0; i< NUM_THREADS; i++) {
       threads[i].join();
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
----------------------------------------------------------------------
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
index ba43ca8..b5d67cf 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/DistinctValuesCollectorTest.java
@@ -59,8 +59,8 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
 
   private final static NullComparator nullComparator = new NullComparator();
   
-  private final String groupField = "author";
-  private final String countField = "publisher";
+  private static final String GROUP_FIELD = "author";
+  private static final String COUNT_FIELD = "publisher";
 
   public void testSimple() throws Exception {
     Random random = random();
@@ -70,24 +70,24 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
         dir,
         newIndexWriterConfig(new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
     Document doc = new Document();
-    addField(doc, groupField, "1");
-    addField(doc, countField, "1");
+    addField(doc, GROUP_FIELD, "1");
+    addField(doc, COUNT_FIELD, "1");
     doc.add(new TextField("content", "random text", Field.Store.NO));
     doc.add(new StringField("id", "1", Field.Store.NO));
     w.addDocument(doc);
 
     // 1
     doc = new Document();
-    addField(doc, groupField, "1");
-    addField(doc, countField, "1");
+    addField(doc, GROUP_FIELD, "1");
+    addField(doc, COUNT_FIELD, "1");
     doc.add(new TextField("content", "some more random text blob", Field.Store.NO));
     doc.add(new StringField("id", "2", Field.Store.NO));
     w.addDocument(doc);
 
     // 2
     doc = new Document();
-    addField(doc, groupField, "1");
-    addField(doc, countField, "2");
+    addField(doc, GROUP_FIELD, "1");
+    addField(doc, COUNT_FIELD, "2");
     doc.add(new TextField("content", "some more random textual data", Field.Store.NO));
     doc.add(new StringField("id", "3", Field.Store.NO));
     w.addDocument(doc);
@@ -95,23 +95,23 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
 
     // 3 -- no count field
     doc = new Document();
-    addField(doc, groupField, "2");
+    addField(doc, GROUP_FIELD, "2");
     doc.add(new TextField("content", "some random text", Field.Store.NO));
     doc.add(new StringField("id", "4", Field.Store.NO));
     w.addDocument(doc);
 
     // 4
     doc = new Document();
-    addField(doc, groupField, "3");
-    addField(doc, countField, "1");
+    addField(doc, GROUP_FIELD, "3");
+    addField(doc, COUNT_FIELD, "1");
     doc.add(new TextField("content", "some more random text", Field.Store.NO));
     doc.add(new StringField("id", "5", Field.Store.NO));
     w.addDocument(doc);
 
     // 5
     doc = new Document();
-    addField(doc, groupField, "3");
-    addField(doc, countField, "1");
+    addField(doc, GROUP_FIELD, "3");
+    addField(doc, COUNT_FIELD, "1");
     doc.add(new TextField("content", "random blob", Field.Store.NO));
     doc.add(new StringField("id", "6", Field.Store.NO));
     w.addDocument(doc);
@@ -119,7 +119,7 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
     // 6 -- no author field
     doc = new Document();
     doc.add(new TextField("content", "random word stuck in alot of other text", Field.Store.YES));
-    addField(doc, countField, "1");
+    addField(doc, COUNT_FIELD, "1");
     doc.add(new StringField("id", "6", Field.Store.NO));
     w.addDocument(doc);
 
@@ -145,10 +145,10 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
     };
 
     // === Search for content:random
-    FirstPassGroupingCollector<Comparable<Object>> firstCollector = createRandomFirstPassCollector(new Sort(), groupField, 10);
+    FirstPassGroupingCollector<Comparable<Object>> firstCollector = createRandomFirstPassCollector(new Sort(), GROUP_FIELD, 10);
     indexSearcher.search(new TermQuery(new Term("content", "random")), firstCollector);
     DistinctValuesCollector<Comparable<Object>> distinctValuesCollector
-        = createDistinctCountCollector(firstCollector, groupField, countField);
+        = createDistinctCountCollector(firstCollector, GROUP_FIELD, COUNT_FIELD);
     indexSearcher.search(new TermQuery(new Term("content", "random")), distinctValuesCollector);
 
     List<DistinctValuesCollector.GroupCount<Comparable<Object>>> gcs = distinctValuesCollector.getGroups();
@@ -178,9 +178,9 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
     compare("1", countValues.get(0));
 
     // === Search for content:some
-    firstCollector = createRandomFirstPassCollector(new Sort(), groupField, 10);
+    firstCollector = createRandomFirstPassCollector(new Sort(), GROUP_FIELD, 10);
     indexSearcher.search(new TermQuery(new Term("content", "some")), firstCollector);
-    distinctValuesCollector = createDistinctCountCollector(firstCollector, groupField, countField);
+    distinctValuesCollector = createDistinctCountCollector(firstCollector, GROUP_FIELD, COUNT_FIELD);
     indexSearcher.search(new TermQuery(new Term("content", "some")), distinctValuesCollector);
 
     gcs = distinctValuesCollector.getGroups();
@@ -205,9 +205,9 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
     compare("1", countValues.get(0));
 
      // === Search for content:blob
-    firstCollector = createRandomFirstPassCollector(new Sort(), groupField, 10);
+    firstCollector = createRandomFirstPassCollector(new Sort(), GROUP_FIELD, 10);
     indexSearcher.search(new TermQuery(new Term("content", "blob")), firstCollector);
-    distinctValuesCollector = createDistinctCountCollector(firstCollector, groupField, countField);
+    distinctValuesCollector = createDistinctCountCollector(firstCollector, GROUP_FIELD, COUNT_FIELD);
     indexSearcher.search(new TermQuery(new Term("content", "blob")), distinctValuesCollector);
 
     gcs = distinctValuesCollector.getGroups();
@@ -242,10 +242,10 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
 
         List<DistinctValuesCollector.GroupCount<Comparable<?>>> expectedResult = createExpectedResult(context, term, groupSort, topN);
 
-        FirstPassGroupingCollector<Comparable<?>> firstCollector = createRandomFirstPassCollector(groupSort, groupField, topN);
+        FirstPassGroupingCollector<Comparable<?>> firstCollector = createRandomFirstPassCollector(groupSort, GROUP_FIELD, topN);
         searcher.search(new TermQuery(new Term("content", term)), firstCollector);
         DistinctValuesCollector<Comparable<?>> distinctValuesCollector
-            = createDistinctCountCollector(firstCollector, groupField, countField);
+            = createDistinctCountCollector(firstCollector, GROUP_FIELD, COUNT_FIELD);
         searcher.search(new TermQuery(new Term("content", term)), distinctValuesCollector);
         @SuppressWarnings("unchecked")
         List<DistinctValuesCollector.GroupCount<Comparable<?>>> actualResult = distinctValuesCollector.getGroups();
@@ -440,10 +440,10 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
       doc.add(new StringField("id", String.format(Locale.ROOT, "%09d", i), Field.Store.YES));
       doc.add(new SortedDocValuesField("id", new BytesRef(String.format(Locale.ROOT, "%09d", i))));
       if (groupValue != null) {
-        addField(doc, groupField, groupValue);
+        addField(doc, GROUP_FIELD, groupValue);
       }
       if (countValue != null) {
-        addField(doc, countField, countValue);
+        addField(doc, COUNT_FIELD, countValue);
       }
       doc.add(new TextField("content", content, Field.Store.YES));
       w.addDocument(doc);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/LengthGoalBreakIteratorTest.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/LengthGoalBreakIteratorTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/LengthGoalBreakIteratorTest.java
index 42d2bf6..4dd30e2 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/LengthGoalBreakIteratorTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/LengthGoalBreakIteratorTest.java
@@ -39,7 +39,7 @@ public class LengthGoalBreakIteratorTest extends LuceneTestCase {
   // We do a '.' BreakIterator and test varying the length goal.
   //                      0         1
   //                      01234567890123456789
-  final String content = "Aa bb. Cc dd. Ee ff";
+  static final String CONTENT = "Aa bb. Cc dd. Ee ff";
 
   public void testTargetLen() throws IOException {
     // "goal" means target length goal to find closest break
@@ -47,22 +47,22 @@ public class LengthGoalBreakIteratorTest extends LuceneTestCase {
     // at first word:
     Query query = query("aa");
     assertEquals("almost two sent",
-        "<b>Aa</b> bb.", highlightClosestToLen(content, query, 9));
+        "<b>Aa</b> bb.", highlightClosestToLen(CONTENT, query, 9));
     assertEquals( "barely two sent",
-        "<b>Aa</b> bb. Cc dd.", highlightClosestToLen(content, query, 10));
+        "<b>Aa</b> bb. Cc dd.", highlightClosestToLen(CONTENT, query, 10));
     assertEquals("long goal",
-        "<b>Aa</b> bb. Cc dd. Ee ff", highlightClosestToLen(content, query, 17 + random().nextInt(20)));
+        "<b>Aa</b> bb. Cc dd. Ee ff", highlightClosestToLen(CONTENT, query, 17 + random().nextInt(20)));
 
     // at some word not at start of passage
     query = query("dd");
     assertEquals("short goal",
-        " Cc <b>dd</b>.", highlightClosestToLen(content, query, random().nextInt(5)));
+        " Cc <b>dd</b>.", highlightClosestToLen(CONTENT, query, random().nextInt(5)));
     assertEquals("almost two sent",
-        " Cc <b>dd</b>.", highlightClosestToLen(content, query, 10));
+        " Cc <b>dd</b>.", highlightClosestToLen(CONTENT, query, 10));
     assertEquals("barely two sent",
-        " Cc <b>dd</b>. Ee ff", highlightClosestToLen(content, query, 11));
+        " Cc <b>dd</b>. Ee ff", highlightClosestToLen(CONTENT, query, 11));
     assertEquals("long goal",
-        " Cc <b>dd</b>. Ee ff", highlightClosestToLen(content, query, 12 + random().nextInt(20)));
+        " Cc <b>dd</b>. Ee ff", highlightClosestToLen(CONTENT, query, 12 + random().nextInt(20)));
   }
 
   public void testMinLen() throws IOException {
@@ -70,19 +70,19 @@ public class LengthGoalBreakIteratorTest extends LuceneTestCase {
 
     Query query = query("dd");
     assertEquals("almost two sent",
-        " Cc <b>dd</b>.", highlightMinLen(content, query, 6));
+        " Cc <b>dd</b>.", highlightMinLen(CONTENT, query, 6));
     assertEquals("barely two sent",
-        " Cc <b>dd</b>. Ee ff", highlightMinLen(content, query, 7));
+        " Cc <b>dd</b>. Ee ff", highlightMinLen(CONTENT, query, 7));
   }
 
   public void testDefaultSummaryTargetLen() throws IOException {
     Query query = query("zz");
     assertEquals("Aa bb.",
-        highlightClosestToLen(content, query, random().nextInt(10))); // < 10
+        highlightClosestToLen(CONTENT, query, random().nextInt(10))); // < 10
     assertEquals("Aa bb. Cc dd.",
-        highlightClosestToLen(content, query, 10 + 6)); // cusp of adding 3rd sentence
+        highlightClosestToLen(CONTENT, query, 10 + 6)); // cusp of adding 3rd sentence
     assertEquals("Aa bb. Cc dd. Ee ff",
-        highlightClosestToLen(content, query, 17 + random().nextInt(20))); // >= 14
+        highlightClosestToLen(CONTENT, query, 17 + random().nextInt(20))); // >= 14
   }
 
   private Query query(String qStr) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
----------------------------------------------------------------------
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
index be75e64..2424219 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
@@ -47,9 +47,9 @@ import org.apache.lucene.util.LuceneTestCase;
 
 public abstract class AbstractTestCase extends LuceneTestCase {
 
-  protected final String F = "f";
-  protected final String F1 = "f1";
-  protected final String F2 = "f2";
+  protected static final String F = "f";
+  protected static final String F1 = "f1";
+  protected static final String F2 = "f2";
   protected Directory dir;
   protected Analyzer analyzerW;
   protected Analyzer analyzerB;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
index f0f4b34..e413584 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java
@@ -42,18 +42,15 @@ import org.apache.lucene.queryparser.surround.query.SrndTruncQuery;
  */
 
 public class QueryParser implements QueryParserConstants {
-  final int minimumPrefixLength = 3;
-  final int minimumCharsInTrunc = 3;
-  final String truncationErrorMessage = "Too unrestrictive truncation: ";
-  final String boostErrorMessage = "Cannot handle boost value: ";
+  static final int MINIMUM_PREFIX_LENGTH = 3;
+  static final int MINIMUM_CHARS_IN_TRUNC = 3;
+  static final String TRUNCATION_ERROR_MESSAGE = "Too unrestrictive truncation: ";
+  static final String BOOST_ERROR_MESSAGE = "Cannot handle boost value: ";
 
   /* CHECKME: These should be the same as for the tokenizer. How? */
-  final char truncator = '*';
-  final char anyChar = '?';
-  final char quote = '"';
-  final char fieldOperator = ':';
-  final char comma = ','; /* prefix list separator */
-  final char carat = '^'; /* weight operator */
+  static final char TRUNCATOR = '*';
+  static final char ANY_CHAR = '?';
+  static final char FIELD_OPERATOR = ':';
 
   static public SrndQuery parse(String query) throws ParseException {
     QueryParser parser = new QueryParser();
@@ -78,7 +75,7 @@ public class QueryParser implements QueryParserConstants {
     /* FIXME: check acceptable subquery: at least one subquery should not be
      * a fields query.
      */
-    return new FieldsQuery(q, fieldNames, fieldOperator);
+    return new FieldsQuery(q, fieldNames, FIELD_OPERATOR);
   }
 
   protected SrndQuery getOrQuery(List<SrndQuery> queries, boolean infix, Token orToken) {
@@ -128,12 +125,12 @@ public class QueryParser implements QueryParserConstants {
   }
 
   protected boolean allowedSuffix(String suffixed) {
-    return (suffixed.length() - 1) >= minimumPrefixLength;
+    return (suffixed.length() - 1) >= MINIMUM_PREFIX_LENGTH;
   }
 
   protected SrndQuery getPrefixQuery(
       String prefix, boolean quoted) {
-    return new SrndPrefixQuery(prefix, quoted, truncator);
+    return new SrndPrefixQuery(prefix, quoted, TRUNCATOR);
   }
 
   protected boolean allowedTruncation(String truncated) {
@@ -141,15 +138,15 @@ public class QueryParser implements QueryParserConstants {
     int nrNormalChars = 0;
     for (int i = 0; i < truncated.length(); i++) {
       char c = truncated.charAt(i);
-      if ((c != truncator) && (c != anyChar)) {
+      if ((c != TRUNCATOR) && (c != ANY_CHAR)) {
         nrNormalChars++;
       }
     }
-    return nrNormalChars >= minimumCharsInTrunc;
+    return nrNormalChars >= MINIMUM_CHARS_IN_TRUNC;
   }
 
   protected SrndQuery getTruncQuery(String truncated) {
-    return new SrndTruncQuery(truncated, truncator, anyChar);
+    return new SrndTruncQuery(truncated, TRUNCATOR, ANY_CHAR);
   }
 
   final public SrndQuery TopSrndQuery() throws ParseException {
@@ -437,7 +434,7 @@ public class QueryParser implements QueryParserConstants {
       term = jj_consume_token(SUFFIXTERM);
                         /* ending in * */
       if (! allowedSuffix(term.image)) {
-        {if (true) throw new ParseException(truncationErrorMessage + term.image);}
+        {if (true) throw new ParseException(TRUNCATION_ERROR_MESSAGE + term.image);}
       }
       {if (true) return getPrefixQuery(term.image.substring(0, term.image.length()-1), false /* not quoted */);}
       break;
@@ -445,15 +442,15 @@ public class QueryParser implements QueryParserConstants {
       term = jj_consume_token(TRUNCTERM);
                        /* with at least one * or ? */
       if (! allowedTruncation(term.image)) {
-        {if (true) throw new ParseException(truncationErrorMessage + term.image);}
+        {if (true) throw new ParseException(TRUNCATION_ERROR_MESSAGE + term.image);}
       }
       {if (true) return getTruncQuery(term.image);}
       break;
     case TRUNCQUOTED:
       term = jj_consume_token(TRUNCQUOTED);
                          /* eg. "9b-b,m"* */
-      if ((term.image.length() - 3) < minimumPrefixLength) {
-        {if (true) throw new ParseException(truncationErrorMessage + term.image);}
+      if ((term.image.length() - 3) < MINIMUM_PREFIX_LENGTH) {
+        {if (true) throw new ParseException(TRUNCATION_ERROR_MESSAGE + term.image);}
       }
       {if (true) return getPrefixQuery(term.image.substring(1, term.image.length()-2), true /* quoted */);}
       break;
@@ -483,10 +480,10 @@ public class QueryParser implements QueryParserConstants {
       try {
         f = Float.parseFloat(weight.image);
       } catch (Exception floatExc) {
-        {if (true) throw new ParseException(boostErrorMessage + weight.image + " (" + floatExc + ")");}
+        {if (true) throw new ParseException(BOOST_ERROR_MESSAGE + weight.image + " (" + floatExc + ")");}
       }
       if (f <= 0.0) {
-        {if (true) throw new ParseException(boostErrorMessage + weight.image);}
+        {if (true) throw new ParseException(BOOST_ERROR_MESSAGE + weight.image);}
       }
       q.setWeight(f * q.getWeight()); /* left associative, fwiw */
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj
index 857cca1..787ed16 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.jj
@@ -73,19 +73,16 @@ import org.apache.lucene.queryparser.surround.query.SrndTruncQuery;
  */
 
 public class QueryParser {
-  final int minimumPrefixLength = 3;
-  final int minimumCharsInTrunc = 3;
-  final String truncationErrorMessage = "Too unrestrictive truncation: ";
-  final String boostErrorMessage = "Cannot handle boost value: ";
- 
+  static final int MINIMUM_PREFIX_LENGTH = 3;
+  static final int MINIMUM_CHARS_IN_TRUNC = 3;
+  static final String TRUNCATION_ERROR_MESSAGE = "Too unrestrictive truncation: ";
+  static final String BOOST_ERROR_MESSAGE = "Cannot handle boost value: ";
+
   /* CHECKME: These should be the same as for the tokenizer. How? */
-  final char truncator = '*';
-  final char anyChar = '?';
-  final char quote = '"';
-  final char fieldOperator = ':';
-  final char comma = ','; /* prefix list separator */
-  final char carat = '^'; /* weight operator */
- 
+  static final char TRUNCATOR = '*';
+  static final char ANY_CHAR = '?';
+  static final char FIELD_OPERATOR = ':';
+
   static public SrndQuery parse(String query) throws ParseException {
     QueryParser parser = new QueryParser();
     return parser.parse2(query);
@@ -109,7 +106,7 @@ public class QueryParser {
     /* FIXME: check acceptable subquery: at least one subquery should not be
      * a fields query.
      */
-    return new FieldsQuery(q, fieldNames, fieldOperator);
+    return new FieldsQuery(q, fieldNames, FIELD_OPERATOR);
   }
   
   protected SrndQuery getOrQuery(List<SrndQuery> queries, boolean infix, Token orToken) {
@@ -159,12 +156,12 @@ public class QueryParser {
   }
 
   protected boolean allowedSuffix(String suffixed) {
-    return (suffixed.length() - 1) >= minimumPrefixLength;
+    return (suffixed.length() - 1) >= MINIMUM_PREFIX_LENGTH;
   }
 
   protected SrndQuery getPrefixQuery(
       String prefix, boolean quoted) {
-    return new SrndPrefixQuery(prefix, quoted, truncator);
+    return new SrndPrefixQuery(prefix, quoted, TRUNCATOR);
   }
   
   protected boolean allowedTruncation(String truncated) {
@@ -172,15 +169,15 @@ public class QueryParser {
     int nrNormalChars = 0;
     for (int i = 0; i < truncated.length(); i++) {
       char c = truncated.charAt(i);
-      if ((c != truncator) && (c != anyChar)) {
+      if ((c != TRUNCATOR) && (c != ANY_CHAR)) {
         nrNormalChars++;
       }
     }
-    return nrNormalChars >= minimumCharsInTrunc;
+    return nrNormalChars >= MINIMUM_CHARS_IN_TRUNC;
   }
 
   protected SrndQuery getTruncQuery(String truncated) {
-    return new SrndTruncQuery(truncated, truncator, anyChar);
+    return new SrndTruncQuery(truncated, TRUNCATOR, ANY_CHAR);
   }
 }
 
@@ -432,21 +429,21 @@ SrndQuery SimpleTerm() : {
       
   | term=<SUFFIXTERM> { /* ending in * */
       if (! allowedSuffix(term.image)) {
-        throw new ParseException(truncationErrorMessage + term.image);
+        throw new ParseException(TRUNCATION_ERROR_MESSAGE + term.image);
       }
       return getPrefixQuery(term.image.substring(0, term.image.length()-1), false /* not quoted */);
     }
       
   | term=<TRUNCTERM> { /* with at least one * or ? */
       if (! allowedTruncation(term.image)) {
-        throw new ParseException(truncationErrorMessage + term.image);
+        throw new ParseException(TRUNCATION_ERROR_MESSAGE + term.image);
       }
       return getTruncQuery(term.image);
     }
       
   | term=<TRUNCQUOTED> { /* eg. "9b-b,m"* */
-      if ((term.image.length() - 3) < minimumPrefixLength) {
-        throw new ParseException(truncationErrorMessage + term.image);
+      if ((term.image.length() - 3) < MINIMUM_PREFIX_LENGTH) {
+        throw new ParseException(TRUNCATION_ERROR_MESSAGE + term.image);
       }
       return getPrefixQuery(term.image.substring(1, term.image.length()-2), true /* quoted */);
     }
@@ -462,10 +459,10 @@ void OptionalWeights(SrndQuery q) : {
       try {
         f = Float.parseFloat(weight.image);
       } catch (Exception floatExc) {
-        throw new ParseException(boostErrorMessage + weight.image + " (" + floatExc + ")");
+        throw new ParseException(BOOST_ERROR_MESSAGE + weight.image + " (" + floatExc + ")");
       }
       if (f <= 0.0) {
-        throw new ParseException(boostErrorMessage + weight.image);
+        throw new ParseException(BOOST_ERROR_MESSAGE + weight.image);
       }      
       q.setWeight(f * q.getWeight()); /* left associative, fwiw */
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java
----------------------------------------------------------------------
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java
index 23bb095..4d933b7 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java
@@ -28,7 +28,7 @@ public class FieldsQuery extends SrndQuery { /* mostly untested */
   private SrndQuery q;
   private List<String> fieldNames;
   private final char fieldOp;
-  private final String OrOperatorName = "OR"; /* for expanded queries, not normally visible */
+  private static final String OR_OPERATOR_NAME = "OR"; /* for expanded queries, not normally visible */
   
   public FieldsQuery(SrndQuery q, List<String> fieldNames, char fieldOp) {
     this.q = q;
@@ -61,7 +61,7 @@ public class FieldsQuery extends SrndQuery { /* mostly untested */
       }
       OrQuery oq = new OrQuery(queries,
                               true /* infix OR for field names */,
-                              OrOperatorName);
+                              OR_OPERATOR_NAME);
       // System.out.println(getClass().toString() + ", fields expanded: " + oq.toString()); /* needs testing */
       return oq.makeLuceneQueryField(null, qf);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoBBoxTest.java
----------------------------------------------------------------------
diff --git a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoBBoxTest.java b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoBBoxTest.java
index 7530c74..401d851 100755
--- a/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoBBoxTest.java
+++ b/lucene/spatial3d/src/test/org/apache/lucene/spatial3d/geom/GeoBBoxTest.java
@@ -27,7 +27,7 @@ import static org.junit.Assert.assertTrue;
 
 public class GeoBBoxTest {
 
-  protected final double DEGREES_TO_RADIANS = Math.PI / 180.0;
+  protected static final double DEGREES_TO_RADIANS = Math.PI / 180.0;
 
   @Test
   public void testBBoxDegenerate() {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
----------------------------------------------------------------------
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
index 78bf299..6f57a2e 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java
@@ -60,7 +60,7 @@ import org.apache.lucene.util.TestUtil;
 
 public final class MockRandomPostingsFormat extends PostingsFormat {
   private final Random seedRandom;
-  private final String SEED_EXT = "sd";
+  private static final String SEED_EXT = "sd";
   
   public MockRandomPostingsFormat() {
     // This ctor should *only* be used at read-time: get NPE if you use it!

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/contrib/analytics/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java
----------------------------------------------------------------------
diff --git a/solr/contrib/analytics/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java b/solr/contrib/analytics/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java
index 31b0576..b93a59e 100644
--- a/solr/contrib/analytics/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java
+++ b/solr/contrib/analytics/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java
@@ -35,34 +35,34 @@ import org.xml.sax.SAXException;
  */
 public class AnalyticsContentHandler implements ContentHandler {
   // XML Element/Attribute Name Constants
-  public final String ANALYTICS_REQUEST_ENVELOPE="analyticsRequestEnvelope";
+  public static final String ANALYTICS_REQUEST_ENVELOPE="analyticsRequestEnvelope";
   
-  public final String ANALYTICS_REQUEST="analyticsRequest";
-  public final String NAME="name";
+  public static final String ANALYTICS_REQUEST="analyticsRequest";
+  public static final String NAME="name";
   
-  public final String STATISTIC="statistic";
-  public final String EXPRESSION="expression";
+  public static final String STATISTIC="statistic";
+  public static final String EXPRESSION="expression";
   
-  public final String FIELD_FACET="fieldFacet";
-  public final String FIELD="field";
-  public final String SHOW_MISSING="showMissing";
-  public final String LIMIT="limit";
-  public final String MIN_COUNT="minCount";
+  public static final String FIELD_FACET="fieldFacet";
+  public static final String FIELD="field";
+  public static final String SHOW_MISSING="showMissing";
+  public static final String LIMIT="limit";
+  public static final String MIN_COUNT="minCount";
   
-  public final String SORT_SPECIFICATION="sortSpecification";
-  public final String STAT_NAME="statName";
-  public final String DIRECTION="direction";
+  public static final String SORT_SPECIFICATION="sortSpecification";
+  public static final String STAT_NAME="statName";
+  public static final String DIRECTION="direction";
   
-  public final String RANGE_FACET="rangeFacet";
-  public final String START="start";
-  public final String END="end";
-  public final String GAP="gap";
-  public final String INCLUDE_BOUNDARY="includeBoundary";
-  public final String OTHER_RANGE="otherRange";
-  public final String HARD_END="hardend";
+  public static final String RANGE_FACET="rangeFacet";
+  public static final String START="start";
+  public static final String END="end";
+  public static final String GAP="gap";
+  public static final String INCLUDE_BOUNDARY="includeBoundary";
+  public static final String OTHER_RANGE="otherRange";
+  public static final String HARD_END="hardend";
   
-  public final String QUERY_FACET="queryFacet";
-  public final String QUERY="query";
+  public static final String QUERY_FACET="queryFacet";
+  public static final String QUERY="query";
   
   // Default Values
   public static final int DEFAULT_FACET_LIMIT = -1;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder2.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder2.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder2.java
index 32a0d4a..27865cd 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder2.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder2.java
@@ -66,7 +66,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     rows.add(createMap("id", "1", "desC", "one"));
     MockDataSource.setIterator("select * from x", rows.iterator());
 
-    runFullImport(dataConfigWithCaseInsensitiveFields);
+    runFullImport(DATA_CONFIG_WITH_CASE_INSENSITIVE_FIELDS);
 
     assertQ(req("id:1"), "//*[@numFound='1']");
     assertTrue("Start event listener was not called", StartEventListener.executed);
@@ -81,7 +81,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     rows.add(createMap("id", "1", "FORCE_ERROR", "true"));
     MockDataSource.setIterator("select * from x", rows.iterator());
 
-    runFullImport(dataConfigWithErrorHandler);
+    runFullImport(DATA_CONFIG_WITH_ERROR_HANDLER);
 
     assertTrue("Error event listener was not called", ErrorEventListener.executed);
     assertTrue(ErrorEventListener.lastException.getMessage().contains("ForcedException"));
@@ -94,7 +94,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     rows.add(createMap("id", "1", "desc", "one"));
     MockDataSource.setIterator("select * from x", rows.iterator());
 
-    runFullImport(dataConfigWithDynamicTransformer);
+    runFullImport(DATA_CONFIG_WITH_DYNAMIC_TRANSFORMER);
 
     assertQ(req("id:1"), "//*[@numFound='1']");
     assertQ(req("dynamic_s:test"), "//*[@numFound='1']");
@@ -110,7 +110,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     LocalSolrQueryRequest request = lrf.makeRequest("command", "full-import",
             "debug", "on", "clean", "true", "commit", "true",
             "category", "search",
-            "dataConfig", requestParamAsVariable);
+            "dataConfig", REQUEST_PARAM_AS_VARIABLE);
     h.query("/dataimport", request);
     assertQ(req("desc:ApacheSolr"), "//*[@numFound='1']");
   }
@@ -124,7 +124,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
 
     LocalSolrQueryRequest request = lrf.makeRequest("command", "full-import",
         "debug", "on", "clean", "true", "commit", "true",
-        "dataConfig", dataConfigWithDynamicFieldNames);
+        "dataConfig", DATA_CONFIG_WITH_DYNAMIC_FIELD_NAMES);
     h.query("/dataimport", request);
     assertQ(req("id:101"), "//*[@numFound='1']", "//*[@name='101_s']");
   }
@@ -139,7 +139,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     LocalSolrQueryRequest request = lrf.makeRequest("command", "full-import",
             "debug", "on", "clean", "true", "commit", "true",
             "mypk", "id", "text", "desc",
-            "dataConfig", dataConfigWithTemplatizedFieldNames);
+            "dataConfig", DATA_CONFIG_WITH_TEMPLATIZED_FIELD_NAMES);
     h.query("/dataimport", request);
     assertQ(req("id:101"), "//*[@numFound='1']");
   }
@@ -162,7 +162,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     rows.add(createMap("id", "2", "desc", "two", DocBuilder.SKIP_DOC, "true"));
     MockDataSource.setIterator("select * from x", rows.iterator());
 
-    runFullImport(dataConfigWithDynamicTransformer);
+    runFullImport(DATA_CONFIG_WITH_DYNAMIC_TRANSFORMER);
 
     assertQ(req("id:1"), "//*[@numFound='1']");
     assertQ(req("id:2"), "//*[@numFound='0']");
@@ -176,7 +176,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     rows.add(createMap("id", "2", "desc", "two", DocBuilder.SKIP_ROW, "true"));
     MockDataSource.setIterator("select * from x", rows.iterator());
 
-    runFullImport(dataConfigWithDynamicTransformer);
+    runFullImport(DATA_CONFIG_WITH_DYNAMIC_TRANSFORMER);
 
     assertQ(req("id:1"), "//*[@numFound='1']");
     assertQ(req("id:2"), "//*[@numFound='0']");
@@ -196,7 +196,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     rows.add(createMap("name_s", "xyz", DocBuilder.SKIP_ROW, "true"));
     MockDataSource.setIterator("4", rows.iterator());
 
-    runFullImport(dataConfigWithTwoEntities);
+    runFullImport(DATA_CONFIG_WITH_TWO_ENTITIES);
     assertQ(req("id:3"), "//*[@numFound='1']");
     assertQ(req("id:4"), "//*[@numFound='1']");
     assertQ(req("name_s:abcd"), "//*[@numFound='1']");
@@ -211,7 +211,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     rows.add(createMap("id", "2", "desc", "two", "$stopTransform", "true"));
     MockDataSource.setIterator("select * from x", rows.iterator());
 
-    runFullImport(dataConfigForSkipTransform);
+    runFullImport(DATA_CONFIG_FOR_SKIP_TRANSFORM);
 
     assertQ(req("id:1"), "//*[@numFound='1']");
     assertQ(req("id:2"), "//*[@numFound='1']");
@@ -227,7 +227,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     rows.add(createMap("id", "3", "desc", "two", DocBuilder.DELETE_DOC_BY_ID, "2"));
     MockDataSource.setIterator("select * from x", rows.iterator());
 
-    runFullImport(dataConfigForSkipTransform);
+    runFullImport(DATA_CONFIG_FOR_SKIP_TRANSFORM);
 
     assertQ(req("id:1"), "//*[@numFound='1']");
     assertQ(req("id:2"), "//*[@numFound='0']");
@@ -243,7 +243,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     rows.add(createMap("id", "3", "desc", "two", DocBuilder.DELETE_DOC_BY_QUERY, "desc:one"));
     MockDataSource.setIterator("select * from x", rows.iterator());
 
-    runFullImport(dataConfigForSkipTransform);
+    runFullImport(DATA_CONFIG_FOR_SKIP_TRANSFORM);
 
     assertQ(req("id:1"), "//*[@numFound='0']");
     assertQ(req("id:2"), "//*[@numFound='0']");
@@ -256,7 +256,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     rows = new ArrayList();
     rows.add(createMap(DocBuilder.DELETE_DOC_BY_ID, "3"));
     MockDataSource.setIterator("select * from x", rows.iterator());
-    runFullImport(dataConfigForSkipTransform, createMap("clean","false"));
+    runFullImport(DATA_CONFIG_FOR_SKIP_TRANSFORM, createMap("clean","false"));
     assertQ(req("id:3"), "//*[@numFound='0']");
     
     assertTrue("Update request processor processDelete was not called", TestUpdateRequestProcessor.processDeleteCalled);
@@ -274,12 +274,12 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     createFile(tmpdir, "a.xml", "a.xml".getBytes(StandardCharsets.UTF_8), true);
     createFile(tmpdir, "b.xml", "b.xml".getBytes(StandardCharsets.UTF_8), true);
     createFile(tmpdir, "c.props", "c.props".getBytes(StandardCharsets.UTF_8), true);
-    runFullImport(dataConfigFileList, params);
+    runFullImport(DATA_CONFIG_FILE_LIST, params);
     assertQ(req("*:*"), "//*[@numFound='3']");
 
     // Add a new file after a full index is done
     createFile(tmpdir, "t.xml", "t.xml".getBytes(StandardCharsets.UTF_8), false);
-    runFullImport(dataConfigFileList, params);
+    runFullImport(DATA_CONFIG_FILE_LIST, params);
     // we should find only 1 because by default clean=true is passed
     // and this particular import should find only one file t.xml
     assertQ(req("*:*"), "//*[@numFound='1']");
@@ -342,7 +342,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
     }
   }
 
-  private final String requestParamAsVariable = "<dataConfig>\n" +
+  private static final String REQUEST_PARAM_AS_VARIABLE = "<dataConfig>\n" +
           "    <dataSource type=\"MockDataSource\" />\n" +
           "    <document>\n" +
           "        <entity name=\"books\" query=\"select * from books where category='${dataimporter.request.category}'\">\n" +
@@ -352,7 +352,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
           "    </document>\n" +
           "</dataConfig>";
 
-   private final String dataConfigWithDynamicTransformer = "<dataConfig> <dataSource type=\"MockDataSource\"/>\n" +
+   private static final String DATA_CONFIG_WITH_DYNAMIC_TRANSFORMER = "<dataConfig> <dataSource type=\"MockDataSource\"/>\n" +
           "    <document>\n" +
           "        <entity name=\"books\" query=\"select * from x\"" +
            "                transformer=\"TestDocBuilder2$AddDynamicFieldTransformer\">\n" +
@@ -362,7 +362,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
           "    </document>\n" +
           "</dataConfig>";
 
-  private final String dataConfigForSkipTransform = "<dataConfig> <dataSource  type=\"MockDataSource\"/>\n" +
+  private static final String DATA_CONFIG_FOR_SKIP_TRANSFORM = "<dataConfig> <dataSource  type=\"MockDataSource\"/>\n" +
           "    <document>\n" +
           "        <entity name=\"books\" query=\"select * from x\"" +
            "                transformer=\"TemplateTransformer\">\n" +
@@ -373,7 +373,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
           "    </document>\n" +
           "</dataConfig>";
 
-  private final String dataConfigWithTwoEntities = "<dataConfig><dataSource type=\"MockDataSource\"/>\n" +
+  private static final String DATA_CONFIG_WITH_TWO_ENTITIES = "<dataConfig><dataSource type=\"MockDataSource\"/>\n" +
           "    <document>\n" +
           "        <entity name=\"books\" query=\"select * from x\">" +
           "            <field column=\"id\" />\n" +
@@ -385,7 +385,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
           "    </document>\n" +
           "</dataConfig>";
 
-  private final String dataConfigWithCaseInsensitiveFields = "<dataConfig> <dataSource  type=\"MockDataSource\"/>\n" +
+  private static final String DATA_CONFIG_WITH_CASE_INSENSITIVE_FIELDS = "<dataConfig> <dataSource  type=\"MockDataSource\"/>\n" +
           "    <document onImportStart=\"TestDocBuilder2$StartEventListener\" onImportEnd=\"TestDocBuilder2$EndEventListener\">\n" +
           "        <entity name=\"books\" query=\"select * from x\">\n" +
           "            <field column=\"ID\" />\n" +
@@ -394,7 +394,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
           "    </document>\n" +
           "</dataConfig>";
 
-  private final String dataConfigWithErrorHandler = "<dataConfig> <dataSource  type=\"MockDataSource\"/>\n" +
+  private static final String DATA_CONFIG_WITH_ERROR_HANDLER = "<dataConfig> <dataSource  type=\"MockDataSource\"/>\n" +
           "    <document onError=\"TestDocBuilder2$ErrorEventListener\">\n" +
           "        <entity name=\"books\" query=\"select * from x\" transformer=\"TestDocBuilder2$ForcedExceptionTransformer\">\n" +
           "            <field column=\"id\" />\n" +
@@ -403,7 +403,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
           "    </document>\n" +
           "</dataConfig>";
 
-  private final String dataConfigWithTemplatizedFieldNames = "<dataConfig><dataSource  type=\"MockDataSource\"/>\n" +
+  private static final String DATA_CONFIG_WITH_TEMPLATIZED_FIELD_NAMES = "<dataConfig><dataSource  type=\"MockDataSource\"/>\n" +
           "    <document>\n" +
           "        <entity name=\"books\" query=\"select * from x\">\n" +
           "            <field column=\"mypk\" name=\"${dih.request.mypk}\" />\n" +
@@ -412,7 +412,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
           "    </document>\n" +
           "</dataConfig>";
 
-  private final String dataConfigWithDynamicFieldNames = "<dataConfig><dataSource  type=\"MockDataSource\"/>\n" +
+  private static final String DATA_CONFIG_WITH_DYNAMIC_FIELD_NAMES = "<dataConfig><dataSource  type=\"MockDataSource\"/>\n" +
       "    <document>\n" +
       "        <entity name=\"books\" query=\"select * from x\">\n" +
       "            <field column=\"mypk\" name=\"id\" />\n" +
@@ -421,7 +421,7 @@ public class TestDocBuilder2 extends AbstractDataImportHandlerTestCase {
       "    </document>\n" +
       "</dataConfig>";
 
-  private final String dataConfigFileList = "<dataConfig>\n" +
+  private static final String DATA_CONFIG_FILE_LIST = "<dataConfig>\n" +
           "\t<document>\n" +
           "\t\t<entity name=\"x\" processor=\"FileListEntityProcessor\" \n" +
           "\t\t\t\tfileName=\".*\" newerThan=\"${dih.last_index_time}\" \n" +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestHierarchicalDocBuilder.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestHierarchicalDocBuilder.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestHierarchicalDocBuilder.java
index 97c7714..603980a 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestHierarchicalDocBuilder.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestHierarchicalDocBuilder.java
@@ -155,7 +155,7 @@ public class TestHierarchicalDocBuilder extends AbstractDataImportHandlerTestCas
     
     int totalDocsNum = parentsNum + childrenNum + grandChildrenNum;
     
-    runFullImport(threeLevelHierarchyConfig);
+    runFullImport(THREE_LEVEL_HIERARCHY_CONFIG);
     
     assertTrue("Update request processor processAdd was not called", TestUpdateRequestProcessor.processAddCalled);
     assertTrue("Update request processor processCommit was not callled", TestUpdateRequestProcessor.processCommitCalled);
@@ -333,9 +333,9 @@ public class TestHierarchicalDocBuilder extends AbstractDataImportHandlerTestCas
     String children = createChildren(parentType, 0, depth, parentData, holder);
     
     String rootFields = createFieldsList(FIELD_ID, "desc", "type_s");
-    String rootEntity = StrUtils.formatString(rootEntityTemplate, parentType, "SELECT * FROM " + parentType, rootFields, children);
+    String rootEntity = StrUtils.formatString(ROOT_ENTITY_TEMPLATE, parentType, "SELECT * FROM " + parentType, rootFields, children);
 
-    String config = StrUtils.formatString(dataConfigTemplate, rootEntity);
+    String config = StrUtils.formatString(DATA_CONFIG_TEMPLATE, rootEntity);
     return config;
   }
   
@@ -396,7 +396,7 @@ public class TestHierarchicalDocBuilder extends AbstractDataImportHandlerTestCas
       List<Hierarchy> childData = createMockedIterator(childName, parentData, holder);
       
       String subChildren = createChildren(childName, currentLevel + 1, maxLevel, childData, holder);
-      String child = StrUtils.formatString(childEntityTemplate, childName, select, fields, subChildren);
+      String child = StrUtils.formatString(CHILD_ENTITY_TEMPLATE, childName, select, fields, subChildren);
       builder.append(child);
       builder.append('\n');
     }
@@ -414,7 +414,7 @@ public class TestHierarchicalDocBuilder extends AbstractDataImportHandlerTestCas
     return builder.toString();
   }
 
-  private final String threeLevelHierarchyConfig = "<dataConfig>\n" +
+  private static final String THREE_LEVEL_HIERARCHY_CONFIG = "<dataConfig>\n" +
       "  <dataSource type='MockDataSource' />\n" +
       "  <document>\n" +
       "    <entity name='PARENT' query='select * from PARENT'>\n" +
@@ -436,7 +436,7 @@ public class TestHierarchicalDocBuilder extends AbstractDataImportHandlerTestCas
       "</dataConfig>";
   
   /** {0} is rootEntity block **/
-  private final String dataConfigTemplate = "<dataConfig><dataSource type=\"MockDataSource\" />\n<document>\n {0}</document></dataConfig>";
+  private static final String DATA_CONFIG_TEMPLATE = "<dataConfig><dataSource type=\"MockDataSource\" />\n<document>\n {0}</document></dataConfig>";
   
   /** 
    * {0} - entityName, 
@@ -444,7 +444,7 @@ public class TestHierarchicalDocBuilder extends AbstractDataImportHandlerTestCas
    * {2} - fieldsList
    * {3} - childEntitiesList 
    **/
-  private final String rootEntityTemplate = "<entity name=\"{0}\" query=\"{1}\">\n{2} {3}\n</entity>\n";
+  private static final String ROOT_ENTITY_TEMPLATE = "<entity name=\"{0}\" query=\"{1}\">\n{2} {3}\n</entity>\n";
   
   /** 
    * {0} - entityName, 
@@ -452,7 +452,7 @@ public class TestHierarchicalDocBuilder extends AbstractDataImportHandlerTestCas
    * {2} - fieldsList
    * {3} - childEntitiesList 
    **/
-  private final String childEntityTemplate = "<entity " + ConfigNameConstants.CHILD + "=\"true\" name=\"{0}\" query=\"{1}\">\n {2} {3} </entity>\n";
+  private static final String CHILD_ENTITY_TEMPLATE = "<entity " + ConfigNameConstants.CHILD + "=\"true\" name=\"{0}\" query=\"{1}\">\n {2} {3} </entity>\n";
   
   private BitSetProducer createParentFilter(String type) {
     BooleanQuery.Builder parentQuery = new BooleanQuery.Builder();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java b/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java
index 6e3ae1e..7518208 100644
--- a/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java
+++ b/solr/core/src/java/org/apache/solr/cloud/DistributedMap.java
@@ -36,7 +36,7 @@ public class DistributedMap {
 
   protected SolrZkClient zookeeper;
 
-  protected final String prefix = "mn-";
+  protected static final String PREFIX = "mn-";
 
   public DistributedMap(SolrZkClient zookeeper, String dir) {
     this.dir = dir;
@@ -56,15 +56,15 @@ public class DistributedMap {
 
 
   public void put(String trackingId, byte[] data) throws KeeperException, InterruptedException {
-    zookeeper.makePath(dir + "/" + prefix + trackingId, data, CreateMode.PERSISTENT, null, false, true);
+    zookeeper.makePath(dir + "/" + PREFIX + trackingId, data, CreateMode.PERSISTENT, null, false, true);
   }
 
   public byte[] get(String trackingId) throws KeeperException, InterruptedException {
-    return zookeeper.getData(dir + "/" + prefix + trackingId, null, null, true);
+    return zookeeper.getData(dir + "/" + PREFIX + trackingId, null, null, true);
   }
 
   public boolean contains(String trackingId) throws KeeperException, InterruptedException {
-    return zookeeper.exists(dir + "/" + prefix + trackingId, true);
+    return zookeeper.exists(dir + "/" + PREFIX + trackingId, true);
   }
 
   public int size() throws KeeperException, InterruptedException {
@@ -80,7 +80,7 @@ public class DistributedMap {
    */
   public boolean remove(String trackingId) throws KeeperException, InterruptedException {
     try {
-      zookeeper.delete(dir + "/" + prefix + trackingId, -1, true);
+      zookeeper.delete(dir + "/" + PREFIX + trackingId, -1, true);
     } catch (KeeperException.NoNodeException e) {
       return false;
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java b/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
index e031303..92e34cf 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
@@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
 public class OverseerTaskQueue extends DistributedQueue {
   private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
   
-  private final String response_prefix = "qnr-" ;
+  private static final String RESPONSE_PREFIX = "qnr-" ;
 
   public OverseerTaskQueue(SolrZkClient zookeeper, String dir) {
     this(zookeeper, dir, new Overseer.Stats());
@@ -88,7 +88,7 @@ public class OverseerTaskQueue extends DistributedQueue {
     Timer.Context time = stats.time(dir + "_remove_event");
     try {
       String path = event.getId();
-      String responsePath = dir + "/" + response_prefix
+      String responsePath = dir + "/" + RESPONSE_PREFIX
           + path.substring(path.lastIndexOf("-") + 1);
       if (zookeeper.exists(responsePath, true)) {
         zookeeper.setData(responsePath, event.getBytes(), true);
@@ -217,7 +217,7 @@ public class OverseerTaskQueue extends DistributedQueue {
 
   String createResponseNode() throws KeeperException, InterruptedException {
     return createData(
-            dir + "/" + response_prefix,
+            dir + "/" + RESPONSE_PREFIX,
             null, CreateMode.EPHEMERAL_SEQUENTIAL);
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/java/org/apache/solr/schema/CurrencyField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/CurrencyField.java b/solr/core/src/java/org/apache/solr/schema/CurrencyField.java
index 9e994cf..7b27c3f 100644
--- a/solr/core/src/java/org/apache/solr/schema/CurrencyField.java
+++ b/solr/core/src/java/org/apache/solr/schema/CurrencyField.java
@@ -504,7 +504,7 @@ public class CurrencyField extends FieldType implements SchemaAware, ResourceLoa
       final FunctionValues currencies = currencyValues.getValues(context, reader);
 
       return new FunctionValues() {
-        private final int MAX_CURRENCIES_TO_CACHE = 256;
+        private static final int MAX_CURRENCIES_TO_CACHE = 256;
         private final int[] fractionDigitCache = new int[MAX_CURRENCIES_TO_CACHE];
         private final String[] currencyOrdToCurrencyCache = new String[MAX_CURRENCIES_TO_CACHE];
         private final double[] exchangeRateCache = new double[MAX_CURRENCIES_TO_CACHE];

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/java/org/apache/solr/update/CommitTracker.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/CommitTracker.java b/solr/core/src/java/org/apache/solr/update/CommitTracker.java
index 9c09ebe..6cf7504 100644
--- a/solr/core/src/java/org/apache/solr/update/CommitTracker.java
+++ b/solr/core/src/java/org/apache/solr/update/CommitTracker.java
@@ -48,7 +48,7 @@ public final class CommitTracker implements Runnable {
   private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
   
   // scheduler delay for maxDoc-triggered autocommits
-  public final int DOC_COMMIT_DELAY_MS = 1;
+  public static final int DOC_COMMIT_DELAY_MS = 1;
   
   // settings, not final so we can change them in testing
   private int docsUpperBound;
@@ -66,7 +66,7 @@ public final class CommitTracker implements Runnable {
 
   private final boolean softCommit;
   private boolean openSearcher;
-  private final boolean waitSearcher = true;
+  private static final boolean WAIT_SEARCHER = true;
 
   private String name;
   
@@ -205,7 +205,7 @@ public final class CommitTracker implements Runnable {
     try {
       CommitUpdateCommand command = new CommitUpdateCommand(req, false);
       command.openSearcher = openSearcher;
-      command.waitSearcher = waitSearcher;
+      command.waitSearcher = WAIT_SEARCHER;
       command.softCommit = softCommit;
       if (core.getCoreDescriptor().getCloudDescriptor() != null
           && core.getCoreDescriptor().getCloudDescriptor().isLeader()

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e80643e5/solr/core/src/java/org/apache/solr/util/SimplePostTool.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/SimplePostTool.java b/solr/core/src/java/org/apache/solr/util/SimplePostTool.java
index e642089..6e18cf9 100644
--- a/solr/core/src/java/org/apache/solr/util/SimplePostTool.java
+++ b/solr/core/src/java/org/apache/solr/util/SimplePostTool.java
@@ -1105,7 +1105,7 @@ public class SimplePostTool {
   //
   class PageFetcher {
     Map<String, List<String>> robotsCache;
-    final String DISALLOW = "Disallow:";
+    static final String DISALLOW = "Disallow:";
     
     public PageFetcher() {
       robotsCache = new HashMap<>();


[52/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-9959 Cleanup, add back and modify previously removed tests.

Posted by ab...@apache.org.
SOLR-9959 Cleanup, add back and modify previously removed tests.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/768524a4
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/768524a4
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/768524a4

Branch: refs/heads/jira/solr-9959
Commit: 768524a41751bc57c17a30766c5c585fe94da8ca
Parents: 14be5a9
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Mon Apr 3 20:43:44 2017 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Mon Apr 3 20:43:44 2017 +0200

----------------------------------------------------------------------
 .../solr/metrics/reporters/SolrJmxReporter.java |   8 +
 .../org/apache/solr/util/stats/MetricUtils.java |  39 +++-
 .../cloud/CollectionsAPIDistributedZkTest.java  |   6 +-
 .../apache/solr/core/RequestHandlersTest.java   |  22 ++
 .../apache/solr/core/TestJmxIntegration.java    | 210 +++++++++++++++++++
 5 files changed, 274 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/768524a4/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
index e93acb9..c8562ae 100644
--- a/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
+++ b/solr/core/src/java/org/apache/solr/metrics/reporters/SolrJmxReporter.java
@@ -209,6 +209,14 @@ public class SolrJmxReporter extends SolrMetricReporter {
     return mBeanServer;
   }
 
+  /**
+   * For unit tests.
+   * @return true if this reporter is actively reporting metrics to JMX.
+   */
+  public boolean isActive() {
+    return reporter != null;
+  }
+
   @Override
   public String toString() {
     return String.format(Locale.ENGLISH, "[%s@%s: domain = %s, service url = %s, agent id = %s]",

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/768524a4/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java b/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
index e09cc88..4a7afaf 100644
--- a/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
+++ b/solr/core/src/java/org/apache/solr/util/stats/MetricUtils.java
@@ -154,6 +154,7 @@ public class MetricUtils {
    * @param mustMatchFilter a {@link MetricFilter}.
    *                        A metric <em>must</em> match this filter to be included in the output.
    * @param skipHistograms discard any {@link Histogram}-s and histogram parts of {@link Timer}-s.
+   * @param skipAggregateValues discard internal values of {@link AggregateMetric}-s.
    * @param compact use compact representation for counters and gauges.
    * @param metadata optional metadata. If not null and not empty then this map will be added under a
    *                 {@code _metadata_} key.
@@ -207,12 +208,30 @@ public class MetricUtils {
         });
   }
 
+  /**
+   * Convert selected metrics from a registry into a map.
+   * @param registry registry
+   * @param names metric names
+   * @return map where keys are metric names (if they were present in the registry) and values are
+   * converted metrics.
+   */
   public static Map<String, Object> convertMetrics(MetricRegistry registry, Collection<String> names) {
     final Map<String, Object> metrics = new HashMap<>();
     convertMetrics(registry, names, false, true, true, (k, v) -> metrics.put(k, v));
     return metrics;
   }
 
+  /**
+   * Convert selected metrics from a registry into a map.
+   * @param registry registry
+   * @param names metric names
+   * @param skipHistograms discard any {@link Histogram}-s and histogram parts of {@link Timer}-s.
+   * @param skipAggregateValues discard internal values of {@link AggregateMetric}-s.
+   * @param compact use compact representation for counters and gauges.
+   * @param consumer consumer that accepts produced {@link SolrInputDocument}-s
+   * @return map where keys are metric names (if they were present in the registry) and values are
+   * converted metrics.
+   */
   public static void convertMetrics(MetricRegistry registry, Collection<String> names,
                                     boolean skipHistograms, boolean skipAggregateValues, boolean compact,
                                     BiConsumer<String, Object> consumer) {
@@ -224,7 +243,7 @@ public class MetricUtils {
         });
   }
 
-  public static void convertMetric(String n, Metric metric, boolean skipHistograms, boolean skipAggregateValues,
+  static void convertMetric(String n, Metric metric, boolean skipHistograms, boolean skipAggregateValues,
                               boolean compact, BiConsumer<String, Object> consumer) {
     if (metric instanceof Counter) {
       Counter counter = (Counter) metric;
@@ -257,7 +276,7 @@ public class MetricUtils {
     }
   }
 
-  public static Map<String, Object> convertAggregateMetric(AggregateMetric metric, boolean skipAggregateValues) {
+  static Map<String, Object> convertAggregateMetric(AggregateMetric metric, boolean skipAggregateValues) {
     Map<String, Object> response = new LinkedHashMap<>();
     response.put("count", metric.size());
     response.put(MAX, metric.getMax());
@@ -278,7 +297,7 @@ public class MetricUtils {
     return response;
   }
 
-  public static Map<String, Object> convertHistogram(Histogram histogram) {
+  static Map<String, Object> convertHistogram(Histogram histogram) {
     Map<String, Object> response = new LinkedHashMap<>();
     Snapshot snapshot = histogram.getSnapshot();
     response.put("count", histogram.getCount());
@@ -297,7 +316,7 @@ public class MetricUtils {
   }
 
   // some snapshots represent time in ns, other snapshots represent raw values (eg. chunk size)
-  public static void addSnapshot(Map<String, Object> response, Snapshot snapshot, boolean ms) {
+  static void addSnapshot(Map<String, Object> response, Snapshot snapshot, boolean ms) {
     response.put((ms ? MIN_MS: MIN), nsToMs(ms, snapshot.getMin()));
     response.put((ms ? MAX_MS: MAX), nsToMs(ms, snapshot.getMax()));
     response.put((ms ? MEAN_MS : MEAN), nsToMs(ms, snapshot.getMean()));
@@ -309,6 +328,12 @@ public class MetricUtils {
     response.put((ms ? P999_MS: P999), nsToMs(ms, snapshot.get999thPercentile()));
   }
 
+  /**
+   * Convert a {@link Timer} to a map.
+   * @param timer timer instance
+   * @param skipHistograms if true then discard the histogram part of the timer.
+   * @return a map containing timer properties.
+   */
   public static Map<String,Object> convertTimer(Timer timer, boolean skipHistograms) {
     Map<String, Object> response = new LinkedHashMap<>();
     response.put("count", timer.getCount());
@@ -323,7 +348,7 @@ public class MetricUtils {
     return response;
   }
 
-  public static Map<String, Object> convertMeter(Meter meter) {
+  static Map<String, Object> convertMeter(Meter meter) {
     Map<String, Object> response = new LinkedHashMap<>();
     response.put("count", meter.getCount());
     response.put("meanRate", meter.getMeanRate());
@@ -333,7 +358,7 @@ public class MetricUtils {
     return response;
   }
 
-  public static Object convertGauge(Gauge gauge, boolean compact) {
+  static Object convertGauge(Gauge gauge, boolean compact) {
     if (compact) {
       return gauge.getValue();
     } else {
@@ -343,7 +368,7 @@ public class MetricUtils {
     }
   }
 
-  public static Object convertCounter(Counter counter, boolean compact) {
+  static Object convertCounter(Counter counter, boolean compact) {
     if (compact) {
       return counter.getCount();
     } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/768524a4/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
index 80a4185..cd711cf 100644
--- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java
@@ -631,7 +631,6 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
 
         try {
           Map<String, String> props = mbean.getKeyPropertyList();
-          MBeanAttributeInfo[] attrs = server.getMBeanInfo(mbean).getAttributes();
           String category = props.get("category");
           String name = props.get("name");
           if ((category != null && category.toString().equals(Category.CORE.toString())) &&
@@ -639,13 +638,12 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
             String indexDir = server.getAttribute(mbean, "Value").toString();
             String key = props.get("dom2") + "." + props.get("dom3") + "." + props.get("dom4");
             if (!indexDirToShardNamesMap.containsKey(indexDir)) {
-              indexDirToShardNamesMap.put(indexDir.toString(), new HashSet<String>());
+              indexDirToShardNamesMap.put(indexDir.toString(), new HashSet<>());
             }
             indexDirToShardNamesMap.get(indexDir.toString()).add(key);
           }
         } catch (Exception e) {
-          log.info(e.toString());
-          // ignore, just continue - probably a "category" or "source" attribute
+          // ignore, just continue - probably a "Value" attribute
           // not found
         }
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/768524a4/solr/core/src/test/org/apache/solr/core/RequestHandlersTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/core/RequestHandlersTest.java b/solr/core/src/test/org/apache/solr/core/RequestHandlersTest.java
index 2aa9916..3c13645 100644
--- a/solr/core/src/test/org/apache/solr/core/RequestHandlersTest.java
+++ b/solr/core/src/test/org/apache/solr/core/RequestHandlersTest.java
@@ -16,10 +16,13 @@
  */
 package org.apache.solr.core;
 
+import java.util.Map;
+
 import com.codahale.metrics.Gauge;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.metrics.SolrMetricManager;
 import org.apache.solr.request.SolrRequestHandler;
+import org.apache.solr.util.stats.MetricUtils;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -96,4 +99,23 @@ public class RequestHandlersTest extends SolrTestCaseJ4 {
     
     assertNull( core.getRequestHandler("/update/asdgadsgas" ) ); // prefix
   }
+
+  @Test
+  public void testStatistics() {
+    SolrCore core = h.getCore();
+    SolrRequestHandler updateHandler = core.getRequestHandler("/update");
+    SolrRequestHandler termHandler = core.getRequestHandler("/terms");
+
+    assertU(adoc("id", "47",
+        "text", "line up and fly directly at the enemy death cannons, clogging them with wreckage!"));
+    assertU(commit());
+
+    Map<String,Object> updateStats = MetricUtils.convertMetrics(updateHandler.getMetricRegistry(), updateHandler.getMetricNames());
+    Map<String,Object> termStats = MetricUtils.convertMetrics(termHandler.getMetricRegistry(), termHandler.getMetricNames());
+
+    Long updateTime = (Long) updateStats.get("UPDATE./update.totalTime");
+    Long termTime = (Long) termStats.get("QUERY./terms.totalTime");
+
+    assertFalse("RequestHandlers should not share statistics!", updateTime.equals(termTime));
+  }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/768524a4/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java b/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java
new file mode 100644
index 0000000..5a54b64
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.core;
+
+import org.apache.solr.metrics.SolrMetricManager;
+import org.apache.solr.metrics.SolrMetricReporter;
+import org.apache.solr.metrics.reporters.JmxObjectNameFactory;
+import org.apache.solr.metrics.reporters.SolrJmxReporter;
+import org.apache.solr.util.AbstractSolrTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.management.AttributeNotFoundException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanInfo;
+import javax.management.MBeanServer;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectInstance;
+import javax.management.ObjectName;
+import java.lang.invoke.MethodHandles;
+import java.lang.management.ManagementFactory;
+import java.util.Hashtable;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Test for JMX Integration
+ *
+ *
+ * @since solr 1.3
+ */
+public class TestJmxIntegration extends AbstractSolrTestCase {
+
+  private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  private static MBeanServer mbeanServer = null;
+  private static JmxObjectNameFactory nameFactory = null;
+  private static String registryName = null;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    // Make sure that at least one MBeanServer is available
+    // prior to initializing the core
+    //
+    // (test configs are setup to use existing server if any, 
+    // otherwise skip JMX)
+    MBeanServer platformServer = ManagementFactory.getPlatformMBeanServer();
+
+    initCore("solrconfig.xml", "schema.xml");
+
+    // we should be able to see that the core has JmxIntegration enabled
+    registryName = h.getCore().getCoreMetricManager().getRegistryName();
+    SolrMetricManager manager = h.getCoreContainer().getMetricManager();
+    Map<String,SolrMetricReporter> reporters = manager.getReporters(registryName);
+    assertEquals(1, reporters.size());
+    SolrMetricReporter reporter = reporters.values().iterator().next();
+    assertTrue(reporter instanceof SolrJmxReporter);
+    SolrJmxReporter jmx = (SolrJmxReporter)reporter;
+    assertTrue("JMX not enabled", jmx.isActive());
+    // and we should be able to see that the reporter
+    // refers to the JMX server we started
+
+    mbeanServer = jmx.getMBeanServer();
+
+    assertNotNull("No JMX server found in the reporter",
+        mbeanServer);
+
+    // NOTE: we can't guarantee that "mbeanServer == platformServer"
+    // the JVM may have multiple MBean servers running when the test started
+    // and the contract of not specifying one when configuring solr.xml without
+    // agetnId or serviceUrl is that it will use whatever the "first" MBean server
+    // returned by the JVM is.
+
+    nameFactory = new JmxObjectNameFactory("default", registryName);
+  }
+
+  @AfterClass
+  public static void afterClass() throws Exception {
+    mbeanServer = null;
+  }
+
+  @Test
+  public void testJmxRegistration() throws Exception {
+    assertTrue("No MBeans found in server", mbeanServer.getMBeanCount() > 0);
+
+    Set<ObjectInstance> objects = mbeanServer.queryMBeans(null, null);
+    assertFalse("No objects found in mbean server", objects
+        .isEmpty());
+    int numDynamicMbeans = 0;
+    for (ObjectInstance o : objects) {
+      ObjectName name = o.getObjectName();
+      assertNotNull("Null name on: " + o.toString(), name);
+      MBeanInfo mbeanInfo = mbeanServer.getMBeanInfo(name);
+      if (name.getDomain().equals("solr")) {
+        numDynamicMbeans++;
+        MBeanAttributeInfo[] attrs = mbeanInfo.getAttributes();
+        if (name.getKeyProperty("name").equals("fetcher")) { // no attributes without active replication
+          continue;
+        }
+        assertTrue("No Attributes found for mbean: " + o.getObjectName() + ", " + mbeanInfo,
+            0 < attrs.length);
+        for (MBeanAttributeInfo attr : attrs) {
+          // ensure every advertised attribute is gettable
+          try {
+            Object trash = mbeanServer.getAttribute(o.getObjectName(), attr.getName());
+          } catch (javax.management.AttributeNotFoundException e) {
+            throw new RuntimeException("Unable to featch attribute for " + o.getObjectName()
+                + ": " + attr.getName(), e);
+          }
+        }
+      }
+    }
+    assertTrue("No MBeans found", 0 < numDynamicMbeans);
+  }
+
+  @Test
+  public void testJmxUpdate() throws Exception {
+
+    SolrInfoBean bean = null;
+    // wait until searcher is registered
+    for (int i=0; i<100; i++) {
+      bean = h.getCore().getInfoRegistry().get("searcher");
+      if (bean != null) break;
+      Thread.sleep(250);
+    }
+    if (bean==null) throw new RuntimeException("searcher was never registered");
+    // nocommit
+    ObjectName searcher = nameFactory.createName("gauge", registryName, "CORE.searcher.*");
+
+    log.info("Mbeans in server: " + mbeanServer.queryNames(null, null));
+
+    assertFalse("No mbean found for SolrIndexSearcher", mbeanServer.queryMBeans(searcher, null).isEmpty());
+
+    int oldNumDocs =  (Integer)mbeanServer.getAttribute(searcher, "numDocs");
+    assertU(adoc("id", "1"));
+    assertU("commit", commit());
+    int numDocs = (Integer)mbeanServer.getAttribute(searcher, "numDocs");
+    assertTrue("New numDocs is same as old numDocs as reported by JMX",
+        numDocs > oldNumDocs);
+  }
+
+  @Test @Ignore("timing problem? https://issues.apache.org/jira/browse/SOLR-2715")
+  public void testJmxOnCoreReload() throws Exception {
+
+    String coreName = h.getCore().getName();
+
+    Set<ObjectInstance> oldBeans = mbeanServer.queryMBeans(null, null);
+    int oldNumberOfObjects = 0;
+    for (ObjectInstance bean : oldBeans) {
+      try {
+        if (String.valueOf(h.getCore().hashCode()).equals(mbeanServer.getAttribute(bean.getObjectName(), "coreHashCode"))) {
+          oldNumberOfObjects++;
+        }
+      } catch (AttributeNotFoundException e) {
+        // expected
+      }
+    }
+
+    log.info("Before Reload: Size of infoRegistry: " + h.getCore().getInfoRegistry().size() + " MBeans: " + oldNumberOfObjects);
+    assertEquals("Number of registered MBeans is not the same as info registry size", h.getCore().getInfoRegistry().size(), oldNumberOfObjects);
+
+    h.getCoreContainer().reload(coreName);
+
+    Set<ObjectInstance> newBeans = mbeanServer.queryMBeans(null, null);
+    int newNumberOfObjects = 0;
+    int registrySize = 0;
+    try (SolrCore core = h.getCoreContainer().getCore(coreName)) {
+      registrySize = core.getInfoRegistry().size();
+      for (ObjectInstance bean : newBeans) {
+        try {
+          if (String.valueOf(core.hashCode()).equals(mbeanServer.getAttribute(bean.getObjectName(), "coreHashCode"))) {
+            newNumberOfObjects++;
+          }
+        } catch (AttributeNotFoundException e) {
+          // expected
+        }
+      }
+    }
+
+    log.info("After Reload: Size of infoRegistry: " + registrySize + " MBeans: " + newNumberOfObjects);
+    assertEquals("Number of registered MBeans is not the same as info registry size", registrySize, newNumberOfObjects);
+  }
+
+  private ObjectName getObjectName(String key, SolrInfoBean infoBean)
+      throws MalformedObjectNameException {
+    Hashtable<String, String> map = new Hashtable<>();
+    map.put("type", key);
+    map.put("id", infoBean.getName());
+    String coreName = h.getCore().getName();
+    return ObjectName.getInstance(("solr" + (null != coreName ? "/" + coreName : "")), map);
+  }
+}
\ No newline at end of file


[08/52] [abbrv] lucene-solr:jira/solr-9959: LUCENE-7754: Inner classes should be static whenever possible.

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/lucene/tools/src/java/org/apache/lucene/validation/LibVersionsCheckTask.java
----------------------------------------------------------------------
diff --git a/lucene/tools/src/java/org/apache/lucene/validation/LibVersionsCheckTask.java b/lucene/tools/src/java/org/apache/lucene/validation/LibVersionsCheckTask.java
index 3179c78..d7844b0 100644
--- a/lucene/tools/src/java/org/apache/lucene/validation/LibVersionsCheckTask.java
+++ b/lucene/tools/src/java/org/apache/lucene/validation/LibVersionsCheckTask.java
@@ -150,7 +150,7 @@ public class LibVersionsCheckTask extends Task {
    */
   private Map<String,HashSet<String>> ignoreConflictVersions = new HashMap<>();
 
-  private class Dependency {
+  private static class Dependency {
     String org;
     String name;
     String directVersion;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java b/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java
index 7545eac..d4418da 100644
--- a/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java
+++ b/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java
@@ -728,8 +728,8 @@ public class MailEntityProcessor extends EntityProcessorBase {
       return true;
     }
   }
-  
-  class MailsSinceLastCheckFilter implements CustomFilter {
+
+  static class MailsSinceLastCheckFilter implements CustomFilter {
     
     private Date since;
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolver.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolver.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolver.java
index 51b5841..8fced58 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolver.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolver.java
@@ -167,7 +167,7 @@ public class VariableResolver {
     return TemplateUpdateProcessorFactory.getVariables(expr, cache);
   }
 
-  class CurrentLevel {
+  static class CurrentLevel {
     final Map<String,Object> map;
     final int level;
     CurrentLevel(int level, Map<String,Object> map) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java
index b72f379..2ef5a91 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java
@@ -73,7 +73,7 @@ public class AbstractDIHCacheTestCase {
 
   //A limitation of this test class is that the primary key needs to be the first one in the list.
   //DIHCaches, however, can handle any field being the primary key.
-  class ControlData implements Comparable<ControlData>, Iterable<Object> {
+  static class ControlData implements Comparable<ControlData>, Iterable<Object> {
     Object[] data;
 
     ControlData(Object[] data) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractSqlEntityProcessorTestCase.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractSqlEntityProcessorTestCase.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractSqlEntityProcessorTestCase.java
index 06b89c3..2774044 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractSqlEntityProcessorTestCase.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractSqlEntityProcessorTestCase.java
@@ -568,8 +568,8 @@ public abstract class AbstractSqlEntityProcessorTestCase extends
     }
     return changeSet.toArray(new String[changeSet.size()]);
   }
-  
-  class IntChanges {
+
+  static class IntChanges {
     public Integer[] changedKeys;
     public Integer[] deletedKeys;
     public Integer[] addedKeys;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java
index 95af5f9..089a133 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestContentStreamDataSource.java
@@ -109,7 +109,7 @@ public class TestContentStreamDataSource extends AbstractDataImportHandlerTestCa
     fail("Commit should have occured but it did not");
   }
   
-  private class SolrInstance {
+  private static class SolrInstance {
     String name;
     Integer port;
     File homeDir;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/XLSXResponseWriter.java
----------------------------------------------------------------------
diff --git a/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/XLSXResponseWriter.java b/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/XLSXResponseWriter.java
index 27a30d1..92bd01f 100644
--- a/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/XLSXResponseWriter.java
+++ b/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/XLSXResponseWriter.java
@@ -98,7 +98,7 @@ class XLSXWriter extends TextResponseWriter {
   SolrQueryRequest req;
   SolrQueryResponse rsp;
 
-  class SerialWriteWorkbook {
+  static class SerialWriteWorkbook {
     SXSSFWorkbook swb;
     Sheet sh;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRScoringQuery.java
----------------------------------------------------------------------
diff --git a/solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRScoringQuery.java b/solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRScoringQuery.java
index 37990ea..6ecbb6f 100644
--- a/solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRScoringQuery.java
+++ b/solr/contrib/ltr/src/java/org/apache/solr/ltr/LTRScoringQuery.java
@@ -300,7 +300,7 @@ public class LTRScoringQuery extends Query {
     return field;
   }
 
-  public class FeatureInfo {
+  public static class FeatureInfo {
     final private String name;
     private float value;
     private boolean used;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRReRankingPipeline.java
----------------------------------------------------------------------
diff --git a/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRReRankingPipeline.java b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRReRankingPipeline.java
index a98fc4f..75d3538 100644
--- a/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRReRankingPipeline.java
+++ b/solr/contrib/ltr/src/test/org/apache/solr/ltr/TestLTRReRankingPipeline.java
@@ -80,7 +80,7 @@ public class TestLTRReRankingPipeline extends LuceneTestCase {
     return features;
   }
 
-  private class MockModel extends LTRScoringModel {
+  private static class MockModel extends LTRScoringModel {
 
     public MockModel(String name, List<Feature> features,
         List<Normalizer> norms,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
----------------------------------------------------------------------
diff --git a/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java b/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
index e7cc9b7..133bc63 100644
--- a/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
+++ b/solr/contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
@@ -376,7 +376,7 @@ public class VelocityResponseWriter implements QueryResponseWriter, SolrCoreAwar
   }
 
   // see: http://svn.apache.org/repos/asf/velocity/tools/branches/2.0.x/src/main/java/org/apache/velocity/tools/generic/ResourceTool.java
-  private class SolrVelocityResourceTool extends ResourceTool {
+  private static class SolrVelocityResourceTool extends ResourceTool {
 
     private ClassLoader solrClassLoader;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/cloud/Overseer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
index e0449b4..4d3cee7 100644
--- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java
+++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java
@@ -431,7 +431,7 @@ public class Overseer implements Closeable {
 
   }
 
-  class OverseerThread extends Thread implements Closeable {
+  static class OverseerThread extends Thread implements Closeable {
 
     protected volatile boolean isClosed;
     private Closeable thread;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java b/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
index c7604d6..e031303 100644
--- a/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
+++ b/solr/core/src/java/org/apache/solr/cloud/OverseerTaskQueue.java
@@ -108,7 +108,7 @@ public class OverseerTaskQueue extends DistributedQueue {
   /**
    * Watcher that blocks until a WatchedEvent occurs for a znode.
    */
-  private final class LatchWatcher implements Watcher {
+  private static final class LatchWatcher implements Watcher {
 
     private final Object lock;
     private WatchedEvent event;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/cloud/ZkController.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
index 69a77f9..a3f1fd4 100644
--- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java
+++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java
@@ -239,7 +239,7 @@ public class ZkController {
   }
 
   // notifies registered listeners after the ZK reconnect in the background
-  private class OnReconnectNotifyAsync implements Callable {
+  private static class OnReconnectNotifyAsync implements Callable {
 
     private final OnReconnect listener;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
index 08a8a4e..e710063 100644
--- a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
+++ b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
  * 
  */
 public abstract class CachingDirectoryFactory extends DirectoryFactory {
-  protected class CacheValue {
+  protected static class CacheValue {
     final public String path;
     final public Directory directory;
     // for debug

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/handler/CdcrReplicator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrReplicator.java b/solr/core/src/java/org/apache/solr/handler/CdcrReplicator.java
index 75a787b..a151a43 100644
--- a/solr/core/src/java/org/apache/solr/handler/CdcrReplicator.java
+++ b/solr/core/src/java/org/apache/solr/handler/CdcrReplicator.java
@@ -207,7 +207,7 @@ public class CdcrReplicator implements Runnable {
   /**
    * Exception to catch update request issues with the target cluster.
    */
-  public class CdcrReplicatorException extends Exception {
+  public static class CdcrReplicatorException extends Exception {
 
     private final UpdateRequest req;
     private final UpdateResponse rsp;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorState.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorState.java b/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorState.java
index 2ca0d80..bf80608 100644
--- a/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorState.java
+++ b/solr/core/src/java/org/apache/solr/handler/CdcrReplicatorState.java
@@ -197,7 +197,7 @@ class CdcrReplicatorState {
 
   }
 
-  class BenchmarkTimer {
+  static class BenchmarkTimer {
 
     private long startTime;
     private long runTime = 0;
@@ -266,7 +266,7 @@ class CdcrReplicatorState {
 
   }
 
-  private class ErrorQueueEntry {
+  private static class ErrorQueueEntry {
 
     private ErrorType type;
     private Date timestamp;
@@ -277,7 +277,7 @@ class CdcrReplicatorState {
     }
   }
 
-  private class FixedQueue<E> extends LinkedList<E> {
+  private static class FixedQueue<E> extends LinkedList<E> {
 
     private int maxSize;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/handler/ExportWriter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/ExportWriter.java b/solr/core/src/java/org/apache/solr/handler/ExportWriter.java
index 7602d9e..bd43d5f 100644
--- a/solr/core/src/java/org/apache/solr/handler/ExportWriter.java
+++ b/solr/core/src/java/org/apache/solr/handler/ExportWriter.java
@@ -939,7 +939,7 @@ public class ExportWriter implements SolrCore.RawWriter, Closeable {
     public int resetValue();
   }
 
-  class IntDesc implements IntComp {
+  static class IntDesc implements IntComp {
 
     public int resetValue() {
       return Integer.MIN_VALUE;
@@ -956,7 +956,7 @@ public class ExportWriter implements SolrCore.RawWriter, Closeable {
     }
   }
 
-  class IntAsc implements IntComp {
+  static class IntAsc implements IntComp {
 
     public int resetValue() {
       return Integer.MAX_VALUE;
@@ -1032,7 +1032,7 @@ public class ExportWriter implements SolrCore.RawWriter, Closeable {
     public long resetValue();
   }
 
-  class LongDesc implements LongComp {
+  static class LongDesc implements LongComp {
 
     public long resetValue() {
       return Long.MIN_VALUE;
@@ -1049,7 +1049,7 @@ public class ExportWriter implements SolrCore.RawWriter, Closeable {
     }
   }
 
-  class LongAsc implements LongComp {
+  static class LongAsc implements LongComp {
 
     public long resetValue() {
       return Long.MAX_VALUE;
@@ -1125,7 +1125,7 @@ public class ExportWriter implements SolrCore.RawWriter, Closeable {
     public float resetValue();
   }
 
-  public class FloatDesc implements FloatComp {
+  public static class FloatDesc implements FloatComp {
     public float resetValue() {
       return -Float.MAX_VALUE;
     }
@@ -1141,7 +1141,7 @@ public class ExportWriter implements SolrCore.RawWriter, Closeable {
     }
   }
 
-  public class FloatAsc implements FloatComp {
+  public static class FloatAsc implements FloatComp {
     public float resetValue() {
       return Float.MAX_VALUE;
     }
@@ -1219,7 +1219,7 @@ public class ExportWriter implements SolrCore.RawWriter, Closeable {
     public double resetValue();
   }
 
-  public class DoubleDesc implements DoubleComp {
+  public static class DoubleDesc implements DoubleComp {
     public double resetValue() {
       return -Double.MAX_VALUE;
     }
@@ -1235,7 +1235,7 @@ public class ExportWriter implements SolrCore.RawWriter, Closeable {
     }
   }
 
-  public class DoubleAsc implements DoubleComp {
+  public static class DoubleAsc implements DoubleComp {
     public double resetValue() {
       return Double.MAX_VALUE;
     }
@@ -1712,7 +1712,7 @@ public class ExportWriter implements SolrCore.RawWriter, Closeable {
     }
   }
 
-  public class IgnoreException extends IOException {
+  public static class IgnoreException extends IOException {
     public void printStackTrace(PrintWriter pw) {
       pw.print("Early Client Disconnect");
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
index 5efb6c5..d79effd 100644
--- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
+++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
@@ -1690,7 +1690,7 @@ public class IndexFetcher {
     }
   }
 
-  private class DirectoryFile implements FileInterface {
+  private static class DirectoryFile implements FileInterface {
     private final String saveAs;
     private Directory copy2Dir;
     private IndexOutput outStream;
@@ -1725,7 +1725,7 @@ public class IndexFetcher {
     }
   }
 
-  private class LocalFsFile implements FileInterface {
+  private static class LocalFsFile implements FileInterface {
     private File copy2Dir;
 
     FileChannel fileChannel;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/handler/SQLHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/SQLHandler.java b/solr/core/src/java/org/apache/solr/handler/SQLHandler.java
index 7563fe8..f307baa 100644
--- a/solr/core/src/java/org/apache/solr/handler/SQLHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/SQLHandler.java
@@ -141,7 +141,7 @@ public class SQLHandler extends RequestHandlerBase implements SolrCoreAware, Per
   /*
    * Only necessary for SolrJ JDBC driver since metadata has to be passed back
    */
-  private class SqlHandlerStream extends JDBCStream {
+  private static class SqlHandlerStream extends JDBCStream {
     private final boolean includeMetadata;
     private boolean firstTuple = true;
     List<String> metadataFields = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index 8078bdc..f6ecd8d 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -521,7 +521,7 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
     rb.rsp.add("expanded", expanded);
   }
 
-  private class GroupExpandCollector implements Collector, GroupCollector {
+  private static class GroupExpandCollector implements Collector, GroupCollector {
     private SortedDocValues docValues;
     private MultiDocValues.OrdinalMap ordinalMap;
     private SortedDocValues segmentValues;
@@ -614,7 +614,7 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
     }
   }
 
-  private class NumericGroupExpandCollector implements Collector, GroupCollector {
+  private static class NumericGroupExpandCollector implements Collector, GroupCollector {
     private NumericDocValues docValues;
 
     private String field;
@@ -763,7 +763,7 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
 
   // this reader alters the content of the given reader so it should not
   // delegate the caching stuff
-  private class ReaderWrapper extends FilterLeafReader {
+  private static class ReaderWrapper extends FilterLeafReader {
 
     private String field;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
index 83a2d60..1c016c7 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandlerFactory.java
@@ -283,7 +283,7 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
    * If all nodes prefer local-cores then a bad/heavily-loaded node will receive less requests from healthy nodes.
    * This will help prevent a distributed deadlock or timeouts in all the healthy nodes due to one bad node.
    */
-  private class IsOnPreferredHostComparator implements Comparator<Object> {
+  private static class IsOnPreferredHostComparator implements Comparator<Object> {
     final private String preferredHostAddress;
     public IsOnPreferredHostComparator(String preferredHostAddress) {
       this.preferredHostAddress = preferredHostAddress;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/handler/component/PivotFacetFieldValueCollection.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetFieldValueCollection.java b/solr/core/src/java/org/apache/solr/handler/component/PivotFacetFieldValueCollection.java
index 6aae231..5c2b07f 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetFieldValueCollection.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/PivotFacetFieldValueCollection.java
@@ -301,7 +301,7 @@ public class PivotFacetFieldValueCollection implements Iterable<PivotFacetValue>
   }
     
   /** Sorts {@link PivotFacetValue} instances by their count */
-  public class PivotFacetCountComparator implements Comparator<PivotFacetValue> {    
+  public static class PivotFacetCountComparator implements Comparator<PivotFacetValue> {
     public int compare(PivotFacetValue left, PivotFacetValue right) {
       int countCmp = right.getCount() - left.getCount();
       return (0 != countCmp) ? countCmp : 
@@ -310,7 +310,7 @@ public class PivotFacetFieldValueCollection implements Iterable<PivotFacetValue>
   }
   
   /** Sorts {@link PivotFacetValue} instances by their value */
-  public class PivotFacetValueComparator implements Comparator<PivotFacetValue> {
+  public static class PivotFacetValueComparator implements Comparator<PivotFacetValue> {
     public int compare(PivotFacetValue left, PivotFacetValue right) {
       return compareWithNullLast(left.getValue(), right.getValue());
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
index b76537a..e00120c 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java
@@ -353,7 +353,7 @@ public class TermsComponent extends SearchComponent {
     return sreq;
   }
 
-  public class TermsHelper {
+  public static class TermsHelper {
     // map to store returned terms
     private HashMap<String, HashMap<String, TermsResponse.Term>> fieldmap;
     private SolrParams params;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
index e4ada59..098e1f7 100644
--- a/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
+++ b/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
@@ -750,7 +750,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf
   }
 
   // Wraps FVH to allow pass-by-reference. Public access to allow use in 3rd party subclasses
-  public class FvhContainer {
+  public static class FvhContainer {
     FastVectorHighlighter fvh;
     FieldQuery fieldQuery;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
index b22feab..9bc888d 100644
--- a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchema.java
@@ -368,13 +368,13 @@ public final class ManagedIndexSchema extends IndexSchema {
   }
 
 
-  public class FieldExistsException extends SolrException {
+  public static class FieldExistsException extends SolrException {
     public FieldExistsException(ErrorCode code, String msg) {
       super(code, msg);
     }
   }
 
-  public class SchemaChangedInZkException extends SolrException {
+  public static class SchemaChangedInZkException extends SolrException {
     public SchemaChangedInZkException(ErrorCode code, String msg) {
       super(code, msg);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/schema/RandomSortField.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/schema/RandomSortField.java b/solr/core/src/java/org/apache/solr/schema/RandomSortField.java
index 0ecb195..44bb420 100644
--- a/solr/core/src/java/org/apache/solr/schema/RandomSortField.java
+++ b/solr/core/src/java/org/apache/solr/schema/RandomSortField.java
@@ -160,7 +160,7 @@ public class RandomSortField extends FieldType {
 
 
 
-  public class RandomValueSource extends ValueSource {
+  public static class RandomValueSource extends ValueSource {
     private final String field;
 
     public RandomValueSource(String field) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/search/ComplexPhraseQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/ComplexPhraseQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/ComplexPhraseQParserPlugin.java
index 22702dc..2a00a09 100644
--- a/solr/core/src/java/org/apache/solr/search/ComplexPhraseQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/ComplexPhraseQParserPlugin.java
@@ -59,9 +59,9 @@ public class ComplexPhraseQParserPlugin extends QParserPlugin {
   /**
    * Modified from {@link org.apache.solr.search.LuceneQParser} and {@link org.apache.solr.search.SurroundQParserPlugin.SurroundQParser}
    */
-  class ComplexPhraseQParser extends QParser {
+  static class ComplexPhraseQParser extends QParser {
 
-    final class SolrQueryParserDelegate extends SolrQueryParser {
+    static final class SolrQueryParserDelegate extends SolrQueryParser {
       private SolrQueryParserDelegate(QParser parser, String defaultField) {
         super(parser, defaultField);
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/search/ExportQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/ExportQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/ExportQParserPlugin.java
index 38bb74f..6c1714c 100644
--- a/solr/core/src/java/org/apache/solr/search/ExportQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/ExportQParserPlugin.java
@@ -121,7 +121,7 @@ public class ExportQParserPlugin extends QParserPlugin {
     }
   }
   
-  private class ExportCollector extends TopDocsCollector  {
+  private static class ExportCollector extends TopDocsCollector  {
 
     private FixedBitSet[] sets;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
index c0aee88..a4f9312 100644
--- a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
+++ b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java
@@ -956,7 +956,7 @@ public class ExtendedDismaxQParser extends QParser {
     
     /** A simple container for storing alias info
      */
-    protected class Alias {
+    protected static class Alias {
       public float tie;
       public Map<String,Float> fields;
     }
@@ -1597,7 +1597,7 @@ public class ExtendedDismaxQParser extends QParser {
   /**
    * Simple container for configuration information used when parsing queries
    */
-  public class ExtendedDismaxConfiguration {
+  public static class ExtendedDismaxConfiguration {
     
     /**
      * The field names specified by 'qf' that (most) clauses will 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/search/HashQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/HashQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/HashQParserPlugin.java
index 3e0fc22..dc75289 100644
--- a/solr/core/src/java/org/apache/solr/search/HashQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/HashQParserPlugin.java
@@ -59,7 +59,7 @@ public class HashQParserPlugin extends QParserPlugin {
     return new HashQParser(query, localParams, params, request);
   }
 
-  private class HashQParser extends QParser {
+  private static class HashQParser extends QParser {
 
     public HashQParser(String query, SolrParams localParams, SolrParams params, SolrQueryRequest request) {
       super(query, localParams, params, request);
@@ -74,7 +74,7 @@ public class HashQParserPlugin extends QParserPlugin {
     }
   }
 
-  private class HashQuery extends ExtendedQueryBase implements PostFilter {
+  private static class HashQuery extends ExtendedQueryBase implements PostFilter {
 
     private String keysParam;
     private int workers;
@@ -135,7 +135,7 @@ public class HashQParserPlugin extends QParserPlugin {
       return searcher.rewrite(constantScoreQuery).createWeight(searcher, false, boost);
     }
 
-    public class BitsFilter extends Filter {
+    public static class BitsFilter extends Filter {
       private FixedBitSet[] bitSets;
       public BitsFilter(FixedBitSet[] bitSets) {
         this.bitSets = bitSets;
@@ -166,7 +166,7 @@ public class HashQParserPlugin extends QParserPlugin {
     }
 
 
-    class SegmentPartitioner implements Runnable {
+    static class SegmentPartitioner implements Runnable {
 
       public LeafReaderContext context;
       private int worker;
@@ -238,7 +238,7 @@ public class HashQParserPlugin extends QParserPlugin {
     }
   }
 
-  private class HashCollector extends DelegatingCollector {
+  private static class HashCollector extends DelegatingCollector {
     private int worker;
     private int workers;
     private HashKey hashKey;
@@ -271,7 +271,7 @@ public class HashQParserPlugin extends QParserPlugin {
     public long hashCode(int doc) throws IOException;
   }
 
-  private class BytesHash implements HashKey {
+  private static class BytesHash implements HashKey {
 
     private SortedDocValues values;
     private String field;
@@ -303,7 +303,7 @@ public class HashQParserPlugin extends QParserPlugin {
     }
   }
 
-  private class NumericHash implements HashKey {
+  private static class NumericHash implements HashKey {
 
     private NumericDocValues values;
     private String field;
@@ -331,7 +331,7 @@ public class HashQParserPlugin extends QParserPlugin {
     }
   }
 
-  private class ZeroHash implements HashKey {
+  private static class ZeroHash implements HashKey {
 
     public long hashCode(int doc) {
       return 0;
@@ -342,7 +342,7 @@ public class HashQParserPlugin extends QParserPlugin {
     }
   }
 
-  private class CompositeHash implements HashKey {
+  private static class CompositeHash implements HashKey {
 
     private HashKey key1;
     private HashKey key2;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/search/ReRankCollector.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/ReRankCollector.java b/solr/core/src/java/org/apache/solr/search/ReRankCollector.java
index 1ac1eaf..8f6bb76 100644
--- a/solr/core/src/java/org/apache/solr/search/ReRankCollector.java
+++ b/solr/core/src/java/org/apache/solr/search/ReRankCollector.java
@@ -139,7 +139,7 @@ public class ReRankCollector extends TopDocsCollector {
     }
   }
 
-  public class BoostedComp implements Comparator {
+  public static class BoostedComp implements Comparator {
     IntFloatHashMap boostedMap;
 
     public BoostedComp(IntIntHashMap boostedDocs, ScoreDoc[] scoreDocs, float maxScore) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/search/ReRankQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/ReRankQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/ReRankQParserPlugin.java
index 3e8bf86..1190cdb 100644
--- a/solr/core/src/java/org/apache/solr/search/ReRankQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/ReRankQParserPlugin.java
@@ -71,7 +71,7 @@ public class ReRankQParserPlugin extends QParserPlugin {
     }
   }
 
-  private final class ReRankQueryRescorer extends QueryRescorer {
+  private static final class ReRankQueryRescorer extends QueryRescorer {
 
     final double reRankWeight;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java
index 9f99919..63e8743 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/FacetFieldMerger.java
@@ -169,7 +169,7 @@ public class FacetFieldMerger extends FacetRequestSortedMerger<FacetField> {
 
 
 
-  private class FacetNumBucketsMerger extends FacetMerger {
+  private static class FacetNumBucketsMerger extends FacetMerger {
     long sumBuckets;
     long shardsMissingSum;
     long shardsTruncatedSum;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/search/facet/UniqueAgg.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/facet/UniqueAgg.java b/solr/core/src/java/org/apache/solr/search/facet/UniqueAgg.java
index 5e1e97c..4cdfe02 100644
--- a/solr/core/src/java/org/apache/solr/search/facet/UniqueAgg.java
+++ b/solr/core/src/java/org/apache/solr/search/facet/UniqueAgg.java
@@ -187,7 +187,7 @@ public class UniqueAgg extends StrAggValueSource {
   }
 
 
-  class NumericAcc extends SlotAcc {
+  static class NumericAcc extends SlotAcc {
     SchemaField sf;
     LongSet[] sets;
     NumericDocValues values;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/search/function/CollapseScoreFunction.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/function/CollapseScoreFunction.java b/solr/core/src/java/org/apache/solr/search/function/CollapseScoreFunction.java
index 5378ea0..3932f56 100644
--- a/solr/core/src/java/org/apache/solr/search/function/CollapseScoreFunction.java
+++ b/solr/core/src/java/org/apache/solr/search/function/CollapseScoreFunction.java
@@ -45,7 +45,7 @@ public class CollapseScoreFunction extends ValueSource {
     return new CollapseScoreFunctionValues(context);
   }
 
-  public class CollapseScoreFunctionValues extends FunctionValues {
+  public static class CollapseScoreFunctionValues extends FunctionValues {
 
     private CollapseScore cscore;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/search/join/GraphQuery.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/search/join/GraphQuery.java b/solr/core/src/java/org/apache/solr/search/join/GraphQuery.java
index 3f762e3..db41651 100644
--- a/solr/core/src/java/org/apache/solr/search/join/GraphQuery.java
+++ b/solr/core/src/java/org/apache/solr/search/join/GraphQuery.java
@@ -315,7 +315,7 @@ public class GraphQuery extends Query {
     
   }
   
-  private class GraphScorer extends Scorer {
+  private static class GraphScorer extends Scorer {
     
     final DocIdSetIterator iter;
     final float score;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/spelling/PossibilityIterator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/spelling/PossibilityIterator.java b/solr/core/src/java/org/apache/solr/spelling/PossibilityIterator.java
index c4a7503..0203f18 100644
--- a/solr/core/src/java/org/apache/solr/spelling/PossibilityIterator.java
+++ b/solr/core/src/java/org/apache/solr/spelling/PossibilityIterator.java
@@ -360,7 +360,7 @@ public class PossibilityIterator implements
     throw new UnsupportedOperationException();
   }
   
-  public class RankedSpellPossibility {
+  public static class RankedSpellPossibility {
     public List<SpellCheckCorrection> corrections;
     public int rank;
     public int index;
@@ -406,7 +406,7 @@ public class PossibilityIterator implements
     }
   }
   
-  private class StartOffsetComparator implements
+  private static class StartOffsetComparator implements
       Comparator<SpellCheckCorrection> {
     @Override
     public int compare(SpellCheckCorrection o1, SpellCheckCorrection o2) {
@@ -414,7 +414,7 @@ public class PossibilityIterator implements
     }
   }
   
-  private class RankComparator implements Comparator<RankedSpellPossibility> {
+  private static class RankComparator implements Comparator<RankedSpellPossibility> {
     // Rank poorer suggestions ahead of better ones for use with a PriorityQueue
     @Override
     public int compare(RankedSpellPossibility r1, RankedSpellPossibility r2) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/update/UpdateLog.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
index daa50a9..84a2005 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java
@@ -204,7 +204,7 @@ public static final int VERSION_IDX = 1;
     }
   };
 
-  public class DBQ {
+  public static class DBQ {
     public String q;     // the query string
     public long version; // positive version of the DBQ
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/util/RTimer.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/RTimer.java b/solr/core/src/java/org/apache/solr/util/RTimer.java
index 7bf075c..1962460 100644
--- a/solr/core/src/java/org/apache/solr/util/RTimer.java
+++ b/solr/core/src/java/org/apache/solr/util/RTimer.java
@@ -41,7 +41,7 @@ public class RTimer {
     double elapsed();
   }
 
-  private class NanoTimeTimerImpl implements TimerImpl {
+  private static class NanoTimeTimerImpl implements TimerImpl {
     private long start;
     public void start() {
       start = System.nanoTime();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/java/org/apache/solr/util/SimplePostTool.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/util/SimplePostTool.java b/solr/core/src/java/org/apache/solr/util/SimplePostTool.java
index a8ef372..e642089 100644
--- a/solr/core/src/java/org/apache/solr/util/SimplePostTool.java
+++ b/solr/core/src/java/org/apache/solr/util/SimplePostTool.java
@@ -1065,7 +1065,7 @@ public class SimplePostTool {
   /**
    * Inner class to filter files based on glob wildcards
    */
-  class GlobFileFilter implements FileFilter
+  static class GlobFileFilter implements FileFilter
   {
     private String _pattern;
     private Pattern p;
@@ -1258,7 +1258,7 @@ public class SimplePostTool {
   /**
    * Utility class to hold the result form a page fetch
    */
-  public class PageFetcherResult {
+  public static class PageFetcherResult {
     int httpStatus = 200;
     String contentType = "text/html";
     URL redirectUrl = null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java
index 35cc444..8a88959 100644
--- a/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/BaseCdcrDistributedZkTest.java
@@ -816,7 +816,7 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
     return info;
   }
 
-  protected class CollectionInfo {
+  protected static class CollectionInfo {
 
     List<CoreInfo> coreInfos = new ArrayList<>();
 
@@ -869,7 +869,7 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
       this.coreInfos.add(info);
     }
 
-    public class CoreInfo {
+    public static class CoreInfo {
       String collectionName;
       String shard;
       boolean isLeader;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
index 628884c..ffc5262 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java
@@ -400,8 +400,8 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
     SolrInputDocument doc = getDoc(fields);
     indexDoc(doc);
   }
-  
-  class ErrorLoggingConcurrentUpdateSolrClient extends ConcurrentUpdateSolrClient {
+
+  static class ErrorLoggingConcurrentUpdateSolrClient extends ConcurrentUpdateSolrClient {
     public ErrorLoggingConcurrentUpdateSolrClient(String serverUrl, HttpClient httpClient, int queueSize, int threadCount) {
       super(serverUrl, httpClient, queueSize, threadCount, null, false);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/cloud/ConnectionManagerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ConnectionManagerTest.java b/solr/core/src/test/org/apache/solr/cloud/ConnectionManagerTest.java
index 17d4c54..90d9cc1 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ConnectionManagerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ConnectionManagerTest.java
@@ -153,7 +153,7 @@ public class ConnectionManagerTest extends SolrTestCaseJ4 {
     }
   }
   
-  private class MockZkClientConnectionStrategy extends DefaultConnectionStrategy {
+  private static class MockZkClientConnectionStrategy extends DefaultConnectionStrategy {
     int called = 0;
     boolean exceptionThrown = false;
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java b/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java
index d1192a1..b6754c7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/DistributedQueueTest.java
@@ -202,7 +202,7 @@ public class DistributedQueueTest extends SolrTestCaseJ4 {
     return new DistributedQueue(zkClient, setupNewDistributedQueueZNode(dqZNode));
   }
 
-  private class QueueChangerThread extends Thread {
+  private static class QueueChangerThread extends Thread {
 
     DistributedQueue dq;
     long waitBeforeOfferMs;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java
index 2582872..cab5ee3 100644
--- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java
@@ -83,8 +83,8 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
     zkClient.makePath("/collections/collection1", true);
     zkClient.makePath("/collections/collection2", true);
   }
-  
-  class TestLeaderElectionContext extends ShardLeaderElectionContextBase {
+
+  static class TestLeaderElectionContext extends ShardLeaderElectionContextBase {
     private long runLeaderDelay = 0;
 
     public TestLeaderElectionContext(LeaderElector leaderElector,

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/cloud/OverriddenZkACLAndCredentialsProvidersTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverriddenZkACLAndCredentialsProvidersTest.java b/solr/core/src/test/org/apache/solr/cloud/OverriddenZkACLAndCredentialsProvidersTest.java
index 56c0df9..f4cbc77 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverriddenZkACLAndCredentialsProvidersTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverriddenZkACLAndCredentialsProvidersTest.java
@@ -214,7 +214,7 @@ public class OverriddenZkACLAndCredentialsProvidersTest extends SolrTestCaseJ4 {
     }
   }
   
-  private class SolrZkClientFactoryUsingCompletelyNewProviders {
+  private static class SolrZkClientFactoryUsingCompletelyNewProviders {
     
     final String digestUsername;
     final String digestPassword;
@@ -274,7 +274,7 @@ public class OverriddenZkACLAndCredentialsProvidersTest extends SolrTestCaseJ4 {
     
   }
   
-  private class SolrZkClientUsingVMParamsProvidersButWithDifferentVMParamsNames extends SolrZkClient {
+  private static class SolrZkClientUsingVMParamsProvidersButWithDifferentVMParamsNames extends SolrZkClient {
     
     public SolrZkClientUsingVMParamsProvidersButWithDifferentVMParamsNames(String zkServerAddress, int zkClientTimeout) {
       super(zkServerAddress, zkClientTimeout);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
index 635292a..48ac91f 100644
--- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java
@@ -78,7 +78,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
   private Thread thread;
   private Queue<QueueEvent> queue = new ArrayBlockingQueue<>(10);
 
-  private class OverseerCollectionConfigSetProcessorToBeTested extends
+  private static class OverseerCollectionConfigSetProcessorToBeTested extends
       OverseerCollectionConfigSetProcessor {
     
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/cloud/ZkSolrClientTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkSolrClientTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkSolrClientTest.java
index faa2ba7..b3a3d7e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ZkSolrClientTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ZkSolrClientTest.java
@@ -40,7 +40,7 @@ public class ZkSolrClientTest extends AbstractSolrTestCase {
     initCore("solrconfig.xml", "schema.xml");
   }
 
-  class ZkConnection implements AutoCloseable {
+  static class ZkConnection implements AutoCloseable {
 
     private ZkTestServer server = null;
     private SolrZkClient zkClient = null;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java b/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java
index 7a7fb9c..8714054 100644
--- a/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java
+++ b/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java
@@ -41,7 +41,7 @@ public class CachingDirectoryFactoryTest extends SolrTestCaseJ4 {
   private Map<String,Tracker> dirs = new HashMap<>();
   private volatile boolean stop = false;
   
-  private class Tracker {
+  private static class Tracker {
     String path;
     AtomicInteger refCnt = new AtomicInteger(0);
     Directory dir;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/handler/AnalysisRequestHandlerTestBase.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/AnalysisRequestHandlerTestBase.java b/solr/core/src/test/org/apache/solr/handler/AnalysisRequestHandlerTestBase.java
index d5f8492..2b52720 100644
--- a/solr/core/src/test/org/apache/solr/handler/AnalysisRequestHandlerTestBase.java
+++ b/solr/core/src/test/org/apache/solr/handler/AnalysisRequestHandlerTestBase.java
@@ -49,7 +49,7 @@ public abstract class AnalysisRequestHandlerTestBase extends SolrTestCaseJ4 {
 
   //================================================= Inner Classes ==================================================
 
-  protected class TokenInfo {
+  protected static class TokenInfo {
 
     private String text;
     private String rawText;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/handler/PingRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/PingRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/PingRequestHandlerTest.java
index 96a01c3..b9ab2c1 100644
--- a/solr/core/src/test/org/apache/solr/handler/PingRequestHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/PingRequestHandlerTest.java
@@ -231,7 +231,7 @@ public class PingRequestHandlerTest extends SolrTestCaseJ4 {
     return rsp;
   }
 
-  class SolrPingWithDistrib extends SolrPing {
+  static class SolrPingWithDistrib extends SolrPing {
     public SolrPing setDistrib(boolean distrib) {   
       getParams().add("distrib", distrib ? "true" : "false");
       return this;    

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java
index 6d46722..961bb25 100644
--- a/solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java
@@ -194,7 +194,7 @@ public class XmlUpdateRequestHandlerTest extends SolrTestCaseJ4 {
       p.assertNoCommandsPending();
     }
 
-    private class MockUpdateRequestProcessor extends UpdateRequestProcessor {
+    private static class MockUpdateRequestProcessor extends UpdateRequestProcessor {
 
       private Queue<DeleteUpdateCommand> deleteCommands = new LinkedList<>();
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/handler/admin/CoreMergeIndexesAdminHandlerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/admin/CoreMergeIndexesAdminHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/CoreMergeIndexesAdminHandlerTest.java
index d026ecd..890ef4a 100644
--- a/solr/core/src/test/org/apache/solr/handler/admin/CoreMergeIndexesAdminHandlerTest.java
+++ b/solr/core/src/test/org/apache/solr/handler/admin/CoreMergeIndexesAdminHandlerTest.java
@@ -50,7 +50,7 @@ public class CoreMergeIndexesAdminHandlerTest extends SolrTestCaseJ4 {
 
   private static String FAILING_MSG = "Creating a directory using FailingDirectoryFactoryException always fails";
   public static class FailingDirectoryFactory extends MockFSDirectoryFactory {
-    public class FailingDirectoryFactoryException extends RuntimeException {
+    public static class FailingDirectoryFactoryException extends RuntimeException {
       public FailingDirectoryFactoryException() {
         super(FAILING_MSG);
       }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/internal/csv/CSVParserTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/internal/csv/CSVParserTest.java b/solr/core/src/test/org/apache/solr/internal/csv/CSVParserTest.java
index 0688c52..ba604f8 100644
--- a/solr/core/src/test/org/apache/solr/internal/csv/CSVParserTest.java
+++ b/solr/core/src/test/org/apache/solr/internal/csv/CSVParserTest.java
@@ -37,7 +37,7 @@ public class CSVParserTest extends TestCase {
   /**
    * TestCSVParser.
    */
-  class TestCSVParser extends CSVParser {
+  static class TestCSVParser extends CSVParser {
     /**
      * Test parser to investigate the type of the internal Token.
      * @param in a Reader

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java
index e58c9dd..f45b193 100644
--- a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java
+++ b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrGraphiteReporterTest.java
@@ -78,7 +78,7 @@ public class SolrGraphiteReporterTest extends SolrTestCaseJ4 {
     }
   }
 
-  private class MockGraphite extends Thread {
+  private static class MockGraphite extends Thread {
     private List<String> lines = new ArrayList<>();
     private ServerSocket server = null;
     private int port;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/response/TestGraphMLResponseWriter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/response/TestGraphMLResponseWriter.java b/solr/core/src/test/org/apache/solr/response/TestGraphMLResponseWriter.java
index e974b42..72af04b 100644
--- a/solr/core/src/test/org/apache/solr/response/TestGraphMLResponseWriter.java
+++ b/solr/core/src/test/org/apache/solr/response/TestGraphMLResponseWriter.java
@@ -84,7 +84,7 @@ public class TestGraphMLResponseWriter extends SolrTestCaseJ4 {
 
   }
 
-  private class TestStream extends TupleStream {
+  private static class TestStream extends TupleStream {
 
     private Iterator<Tuple> tuples;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/rest/TestManagedResource.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/rest/TestManagedResource.java b/solr/core/src/test/org/apache/solr/rest/TestManagedResource.java
index cce2109..c06abf0 100644
--- a/solr/core/src/test/org/apache/solr/rest/TestManagedResource.java
+++ b/solr/core/src/test/org/apache/solr/rest/TestManagedResource.java
@@ -49,7 +49,7 @@ public class TestManagedResource extends SolrTestCaseJ4 {
    * Mock class that acts like an analysis component that depends on
    * data managed by a ManagedResource
    */
-  private class MockAnalysisComponent implements ManagedResourceObserver {
+  private static class MockAnalysisComponent implements ManagedResourceObserver {
     
     private boolean wasNotified = false;
 
@@ -121,7 +121,7 @@ public class TestManagedResource extends SolrTestCaseJ4 {
   /**
    * Implements a Java serialization based storage format.
    */
-  private class SerializableStorage extends ManagedResourceStorage {
+  private static class SerializableStorage extends ManagedResourceStorage {
     
     SerializableStorage(StorageIO storageIO, SolrResourceLoader loader) {
       super(storageIO, loader);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/rest/TestRestManager.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/rest/TestRestManager.java b/solr/core/src/test/org/apache/solr/rest/TestRestManager.java
index ab2c45b..cc6c58a 100644
--- a/solr/core/src/test/org/apache/solr/rest/TestRestManager.java
+++ b/solr/core/src/test/org/apache/solr/rest/TestRestManager.java
@@ -62,7 +62,7 @@ public class TestRestManager extends SolrRestletTestBase {
     
   }
   
-  private class MockAnalysisComponent implements ManagedResourceObserver {
+  private static class MockAnalysisComponent implements ManagedResourceObserver {
 
     @Override
     public void onManagedResourceInitialized(NamedList<?> args, ManagedResource res)

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchemaConcurrent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchemaConcurrent.java b/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchemaConcurrent.java
index e724da9..e6e4da1 100644
--- a/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchemaConcurrent.java
+++ b/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchemaConcurrent.java
@@ -248,7 +248,7 @@ public class TestCloudManagedSchemaConcurrent extends AbstractFullDistribZkTestB
     schemaLockTest();
   }
   
-  private class Info {
+  private static class Info {
     int numAddFieldPuts = 0;
     int numAddFieldPosts = 0;
     int numAddDynamicFieldPuts = 0;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/search/AnalyticsTestQParserPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/AnalyticsTestQParserPlugin.java b/solr/core/src/test/org/apache/solr/search/AnalyticsTestQParserPlugin.java
index 392fb82..158df17 100644
--- a/solr/core/src/test/org/apache/solr/search/AnalyticsTestQParserPlugin.java
+++ b/solr/core/src/test/org/apache/solr/search/AnalyticsTestQParserPlugin.java
@@ -42,7 +42,7 @@ public class AnalyticsTestQParserPlugin extends QParserPlugin {
     return new TestAnalyticsQueryParser(query, localParams, params, req);
   }
 
-  class TestAnalyticsQueryParser extends QParser {
+  static class TestAnalyticsQueryParser extends QParser {
 
     public TestAnalyticsQueryParser(String query, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
       super(query, localParams, params, req);
@@ -58,7 +58,7 @@ public class AnalyticsTestQParserPlugin extends QParserPlugin {
     }
   }
 
-  class TestAnalyticsQuery extends AnalyticsQuery {
+  static class TestAnalyticsQuery extends AnalyticsQuery {
 
     private int base;
 
@@ -72,7 +72,7 @@ public class AnalyticsTestQParserPlugin extends QParserPlugin {
     }
   }
 
-  class TestAnalyticsCollector extends DelegatingCollector {
+  static class TestAnalyticsCollector extends DelegatingCollector {
     ResponseBuilder rb;
     int count;
     int base;
@@ -97,7 +97,7 @@ public class AnalyticsTestQParserPlugin extends QParserPlugin {
     }
   }
 
-  class TestAnalyticsMergeStrategy implements MergeStrategy {
+  static class TestAnalyticsMergeStrategy implements MergeStrategy {
 
     public boolean mergesIds() {
       return false;
@@ -130,7 +130,7 @@ public class AnalyticsTestQParserPlugin extends QParserPlugin {
     }
   }
 
-  class TestIterative extends IterativeMergeStrategy  {
+  static class TestIterative extends IterativeMergeStrategy  {
 
     public void process(ResponseBuilder rb, ShardRequest sreq) throws Exception {
       int count = 0;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
index 27bf40f..a887fed 100644
--- a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
@@ -1830,7 +1830,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
     return false;
   }
 
-  class MultilanguageQueryParser extends ExtendedDismaxQParser {
+  static class MultilanguageQueryParser extends ExtendedDismaxQParser {
 
     public MultilanguageQueryParser(String qstr, SolrParams localParams,
         SolrParams params, SolrQueryRequest req) {
@@ -1857,10 +1857,10 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 {
     }
     
   }
-  
-  
-  
-  class FuzzyDismaxQParser extends ExtendedDismaxQParser {
+
+
+
+  static class FuzzyDismaxQParser extends ExtendedDismaxQParser {
 
     public FuzzyDismaxQParser(String qstr, SolrParams localParams,
         SolrParams params, SolrQueryRequest req) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/search/TestFiltering.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestFiltering.java b/solr/core/src/test/org/apache/solr/search/TestFiltering.java
index 9f9a51a..c718283 100644
--- a/solr/core/src/test/org/apache/solr/search/TestFiltering.java
+++ b/solr/core/src/test/org/apache/solr/search/TestFiltering.java
@@ -174,7 +174,7 @@ public class TestFiltering extends SolrTestCaseJ4 {
   }
 
 
-  class Model {
+  static class Model {
     int indexSize;
     FixedBitSet answer;
     FixedBitSet multiSelect;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java b/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
index b42861a..a526621 100644
--- a/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
+++ b/solr/core/src/test/org/apache/solr/search/TestRankQueryPlugin.java
@@ -75,7 +75,7 @@ public class TestRankQueryPlugin extends QParserPlugin {
     return new TestRankQueryParser(query, localParams, params, req);
   }
 
-  class TestRankQueryParser extends QParser {
+  static class TestRankQueryParser extends QParser {
 
     public TestRankQueryParser(String query, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
       super(query, localParams, params, req);
@@ -89,7 +89,7 @@ public class TestRankQueryPlugin extends QParserPlugin {
     }
   }
 
-  class TestRankQuery extends RankQuery {
+  static class TestRankQuery extends RankQuery {
 
     private int mergeStrategy;
     private int collector;
@@ -143,7 +143,7 @@ public class TestRankQueryPlugin extends QParserPlugin {
     }
   }
 
-  class TestMergeStrategy implements MergeStrategy {
+  static class TestMergeStrategy implements MergeStrategy {
 
     public int getCost() {
       return 1;
@@ -314,7 +314,7 @@ public class TestRankQueryPlugin extends QParserPlugin {
     }
   }
 
-  class TestMergeStrategy1 implements MergeStrategy {
+  static class TestMergeStrategy1 implements MergeStrategy {
 
     public int getCost() {
       return 1;
@@ -435,7 +435,7 @@ public class TestRankQueryPlugin extends QParserPlugin {
       }
     }
 
-    private class FakeScorer extends Scorer {
+    private static class FakeScorer extends Scorer {
 
       final int docid;
       final float score;
@@ -674,7 +674,7 @@ public class TestRankQueryPlugin extends QParserPlugin {
   }
 
 
-  class TestCollector extends TopDocsCollector {
+  static class TestCollector extends TopDocsCollector {
 
     private List<ScoreDoc> list = new ArrayList();
 
@@ -743,7 +743,7 @@ public class TestRankQueryPlugin extends QParserPlugin {
     }
   }
 
-  class TestCollector1 extends TopDocsCollector {
+  static class TestCollector1 extends TopDocsCollector {
 
     private List<ScoreDoc> list = new ArrayList();
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/spelling/ConjunctionSolrSpellCheckerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/spelling/ConjunctionSolrSpellCheckerTest.java b/solr/core/src/test/org/apache/solr/spelling/ConjunctionSolrSpellCheckerTest.java
index 2a7daa9..31f20fb 100644
--- a/solr/core/src/test/org/apache/solr/spelling/ConjunctionSolrSpellCheckerTest.java
+++ b/solr/core/src/test/org/apache/solr/spelling/ConjunctionSolrSpellCheckerTest.java
@@ -44,8 +44,8 @@ public class ConjunctionSolrSpellCheckerTest extends LuceneTestCase {
       // correct behavior
     }
   }
-  
-  class MockSolrSpellChecker extends SolrSpellChecker {
+
+  static class MockSolrSpellChecker extends SolrSpellChecker {
     
     final StringDistance sd;
     

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java b/solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java
index 75de4db..b221044 100644
--- a/solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java
+++ b/solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java
@@ -230,7 +230,7 @@ public class IndexBasedSpellCheckerTest extends SolrTestCaseJ4 {
     }
   }
 
-  private class TestSpellChecker extends IndexBasedSpellChecker{
+  private static class TestSpellChecker extends IndexBasedSpellChecker{
     @Override
     public SpellChecker getSpellChecker(){
       return spellChecker;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java b/solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
index 4272c24..99f7ce6 100644
--- a/solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
+++ b/solr/core/src/test/org/apache/solr/store/blockcache/BlockDirectoryTest.java
@@ -37,7 +37,7 @@ import org.junit.Test;
 
 public class BlockDirectoryTest extends SolrTestCaseJ4 {
 
-  private class MapperCache implements Cache {
+  private static class MapperCache implements Cache {
     public Map<String, byte[]> map = Caffeine.newBuilder()
         .maximumSize(8)
         .<String, byte[]>build()

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/util/SimplePostToolTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/util/SimplePostToolTest.java b/solr/core/src/test/org/apache/solr/util/SimplePostToolTest.java
index 7f70024..c9f8fc5 100644
--- a/solr/core/src/test/org/apache/solr/util/SimplePostToolTest.java
+++ b/solr/core/src/test/org/apache/solr/util/SimplePostToolTest.java
@@ -225,7 +225,7 @@ public class SimplePostToolTest extends SolrTestCaseJ4 {
     
     @Override
     public PageFetcherResult readPageFromUrl(URL u) {
-      PageFetcherResult res = (new SimplePostTool()).new PageFetcherResult();
+      PageFetcherResult res = new PageFetcherResult();
       if (isDisallowedByRobots(u)) {
         res.httpStatus = 403;
         return res;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/util/SolrPluginUtilsTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/util/SolrPluginUtilsTest.java b/solr/core/src/test/org/apache/solr/util/SolrPluginUtilsTest.java
index e41484f..af85ad6 100644
--- a/solr/core/src/test/org/apache/solr/util/SolrPluginUtilsTest.java
+++ b/solr/core/src/test/org/apache/solr/util/SolrPluginUtilsTest.java
@@ -400,7 +400,7 @@ public class SolrPluginUtilsTest extends SolrTestCaseJ4 {
     assertEquals(3, q.build().getMinimumNumberShouldMatch());
   }
 
-  private class InvokeSettersTestClass {
+  private static class InvokeSettersTestClass {
     private float aFloat = random().nextFloat();
     public float getAFloat() {
       return aFloat;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/core/src/test/org/apache/solr/util/TestSolrCLIRunExample.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/util/TestSolrCLIRunExample.java b/solr/core/src/test/org/apache/solr/util/TestSolrCLIRunExample.java
index 28a58ff..7980560 100644
--- a/solr/core/src/test/org/apache/solr/util/TestSolrCLIRunExample.java
+++ b/solr/core/src/test/org/apache/solr/util/TestSolrCLIRunExample.java
@@ -72,7 +72,7 @@ public class TestSolrCLIRunExample extends SolrTestCaseJ4 {
    * Overrides the call to exec bin/solr to start Solr nodes to start them using the Solr test-framework
    * instead of the script, since the script depends on a full build.
    */
-  private class RunExampleExecutor extends DefaultExecutor implements Closeable {
+  private static class RunExampleExecutor extends DefaultExecutor implements Closeable {
 
     private PrintStream stdout;
     private List<org.apache.commons.exec.CommandLine> commandsExecuted = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
index 4eac2a5..fa93503 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClient.java
@@ -460,7 +460,7 @@ public class ConcurrentUpdateSolrClient extends SolrClient {
   /**
    * Class representing an UpdateRequest and an optional collection.
    */
-  class Update {
+  static class Update {
     UpdateRequest request;
     String collection;
     /**

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/java/org/apache/solr/client/solrj/io/ModelCache.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/ModelCache.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/ModelCache.java
index 4676594..1d7e46f 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/ModelCache.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/ModelCache.java
@@ -110,7 +110,7 @@ public class ModelCache {
     }
   }
 
-  private class Model {
+  private static class Model {
     private Tuple tuple;
     private long lastChecked;
 
@@ -128,7 +128,7 @@ public class ModelCache {
     }
   }
 
-  private class LRU extends LinkedHashMap<String, Model> {
+  private static class LRU extends LinkedHashMap<String, Model> {
 
     private int maxSize;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/GatherNodesStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/GatherNodesStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/GatherNodesStream.java
index 8059677..bbd7669 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/GatherNodesStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/GatherNodesStream.java
@@ -624,7 +624,7 @@ public class GatherNodesStream extends TupleStream implements Expressible {
     return null;
   }
 
-  class NodeStream extends TupleStream {
+  static class NodeStream extends TupleStream {
 
     private List<String> ids;
     private Iterator<String> it;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/ShortestPathStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/ShortestPathStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/ShortestPathStream.java
index aa546ae..5075330 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/ShortestPathStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/graph/ShortestPathStream.java
@@ -492,7 +492,7 @@ public class ShortestPathStream extends TupleStream implements Expressible {
     }
   }
 
-  private class Edge {
+  private static class Edge {
 
     private String from;
     private String to;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/java/org/apache/solr/client/solrj/io/ops/GroupOperation.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/ops/GroupOperation.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/ops/GroupOperation.java
index 4a28cc1..a2bd8c9 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/ops/GroupOperation.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/ops/GroupOperation.java
@@ -133,7 +133,7 @@ public class GroupOperation implements ReduceOperation {
     }
   }
 
-  class ReverseComp implements Comparator<Tuple>, Serializable {
+  static class ReverseComp implements Comparator<Tuple>, Serializable {
     private StreamComparator comp;
 
     public ReverseComp(StreamComparator comp) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RankStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RankStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RankStream.java
index a9fb1af..e301081 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RankStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/RankStream.java
@@ -213,7 +213,7 @@ public class RankStream extends TupleStream implements Expressible {
     return 0;
   }
 
-  class ReverseComp implements Comparator<Tuple>, Serializable {
+  static class ReverseComp implements Comparator<Tuple>, Serializable {
 
     private static final long serialVersionUID = 1L;
     private StreamComparator comp;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SignificantTermsStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SignificantTermsStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SignificantTermsStream.java
index b4decd5..45cd272 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SignificantTermsStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/SignificantTermsStream.java
@@ -339,7 +339,7 @@ public class SignificantTermsStream extends TupleStream implements Expressible{
     }
   }
 
-  private class ScoreComp implements Comparator<Map> {
+  private static class ScoreComp implements Comparator<Map> {
     public int compare(Map a, Map b) {
       Float scorea = (Float)a.get("score");
       Float scoreb = (Float)b.get("score");

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TextLogitStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TextLogitStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TextLogitStream.java
index 5a70ced..a6ba9d9 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TextLogitStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/TextLogitStream.java
@@ -532,7 +532,7 @@ public class TextLogitStream extends TupleStream implements Expressible {
     return buf.toString();
   }
 
-  protected class TermsStream extends TupleStream {
+  protected static class TermsStream extends TupleStream {
 
     private List<String> terms;
     private Iterator<String> it;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java
index 946225d..89ef351 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java
@@ -211,7 +211,7 @@ public class SpellCheckResponse {
 
   }
 
-  public class Collation {
+  public static class Collation {
     private String collationQueryString;
     private List<Correction> misspellingsAndCorrections = new ArrayList<>();
     private long numberOfHits;
@@ -244,7 +244,7 @@ public class SpellCheckResponse {
 
   }
 
-  public class Correction {
+  public static class Correction {
     private String original;
     private String correction;
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
index 80531cb..7cefbee 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java
@@ -145,7 +145,7 @@ public class ZkStateReader implements Closeable {
 
   private final ExecutorService notifications = ExecutorUtil.newMDCAwareCachedThreadPool("watches");
 
-  private class CollectionWatch {
+  private static class CollectionWatch {
 
     int coreRefCount = 0;
     Set<CollectionStateWatcher> stateWatchers = ConcurrentHashMap.newKeySet();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java b/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java
index 964ea26..3c1c45f 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java
@@ -247,7 +247,7 @@ public class TestLBHttpSolrClient extends SolrTestCaseJ4 {
     }
   }
   
-  private class SolrInstance {
+  private static class SolrInstance {
     String name;
     File homeDir;
     File dataDir;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java
index 093238d..02ed7be 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java
@@ -104,8 +104,8 @@ public class SolrExampleStreamingTest extends SolrExampleTests {
                    null, failures.get(0));
     }
   }
-  
-  class FailureRecordingConcurrentUpdateSolrClient extends ConcurrentUpdateSolrClient {
+
+  static class FailureRecordingConcurrentUpdateSolrClient extends ConcurrentUpdateSolrClient {
     private final List<Throwable> failures = new ArrayList<>();
     
     public FailureRecordingConcurrentUpdateSolrClient(String serverUrl, int queueSize, int numThreads) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientTest.java
index de728bd..24f08d2 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrClientTest.java
@@ -252,7 +252,7 @@ public class ConcurrentUpdateSolrClientTest extends SolrJettyTestBase {
 
   }
 
-  class SendDocsRunnable implements Runnable {
+  static class SendDocsRunnable implements Runnable {
     
     private String id;
     private int numDocs;
@@ -289,8 +289,8 @@ public class ConcurrentUpdateSolrClientTest extends SolrJettyTestBase {
       }      
     }    
   }
-  
-  class OutcomeCountingConcurrentUpdateSolrClient extends ConcurrentUpdateSolrClient {
+
+  static class OutcomeCountingConcurrentUpdateSolrClient extends ConcurrentUpdateSolrClient {
     private final AtomicInteger successCounter;
     private final AtomicInteger failureCounter;
     private final StringBuilder errors;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/53064e46/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
----------------------------------------------------------------------
diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
index 04fc3ff..f82ccc6 100644
--- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
+++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java
@@ -1627,7 +1627,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase {
     }
   }
 
-  protected class FldType {
+  protected static class FldType {
     public String fname;
     public IVals numValues;
     public Vals vals;


[04/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10344: Update Solr default/example and test configs to use WordDelimiterGraphFilterFactory

Posted by ab...@apache.org.
SOLR-10344: Update Solr default/example and test configs to use WordDelimiterGraphFilterFactory


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/b46b8278
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/b46b8278
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/b46b8278

Branch: refs/heads/jira/solr-9959
Commit: b46b8278c60102eeaeb45dddf239356746da7de8
Parents: cb20eae
Author: Steve Rowe <sa...@gmail.com>
Authored: Mon Mar 27 23:53:55 2017 -0400
Committer: Steve Rowe <sa...@gmail.com>
Committed: Mon Mar 27 23:53:55 2017 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../clustering/solr/collection1/conf/schema.xml | 23 +++++--
 .../solr/collection1/conf/synonyms.txt          |  2 +-
 .../conf/dataimport-schema-no-unique-key.xml    |  7 +-
 .../solr/collection1/conf/dataimport-schema.xml | 17 +++--
 .../conf/dataimport-solr_id-schema.xml          | 23 +++++--
 .../extraction/solr/collection1/conf/schema.xml | 40 ++++++++----
 .../solr/collection1/conf/synonyms.txt          |  2 +-
 .../uima/solr/collection1/conf/schema.xml       | 37 +++++++----
 .../uima/solr/collection1/conf/synonyms.txt     |  2 +-
 .../test-files/uima/uima-tokenizers-schema.xml  | 34 +++++++---
 .../conf/schema-HighlighterMaxOffsetTest.xml    |  5 +-
 .../collection1/conf/schema-copyfield-test.xml  | 29 ++++++---
 .../collection1/conf/schema-docValuesJoin.xml   |  5 +-
 .../solr/collection1/conf/schema-folding.xml    |  2 +-
 .../solr/collection1/conf/schema-hash.xml       | 59 ++++++++++++-----
 .../collection1/conf/schema-psuedo-fields.xml   |  5 +-
 .../collection1/conf/schema-required-fields.xml | 18 ++++--
 .../solr/collection1/conf/schema-rest.xml       | 63 ++++++++++++------
 .../conf/schema-single-dynamic-copy-field.xml   | 63 ++++++++++++------
 .../solr/collection1/conf/schema-sql.xml        | 59 ++++++++++++-----
 .../solr/collection1/conf/schema-trie.xml       | 23 +++++--
 .../test-files/solr/collection1/conf/schema.xml | 58 ++++++++++++-----
 .../solr/collection1/conf/schema11.xml          | 27 ++++++--
 .../solr/collection1/conf/schema12.xml          | 63 ++++++++++++------
 .../solr/collection1/conf/schema15.xml          | 63 ++++++++++++------
 .../solr/collection1/conf/schema_latest.xml     | 27 ++++++--
 .../solr/collection1/conf/schemasurround.xml    | 68 +++++++++++++-------
 .../solr/collection1/conf/synonyms.txt          |  2 +-
 .../solr/collection1/conf/wdftypes.txt          |  2 +-
 .../configsets/cloud-dynamic/conf/schema.xml    | 35 +++++++---
 .../solr/configsets/doc-expiry/conf/schema.xml  | 35 +++++++---
 .../org/apache/solr/ConvertedLegacyTest.java    |  2 +-
 .../FieldAnalysisRequestHandlerTest.java        | 14 ++--
 .../solr/rest/schema/TestBulkSchemaAPI.java     | 29 ++++++++-
 .../example-DIH/solr/db/conf/managed-schema     | 26 ++++++--
 .../example-DIH/solr/db/conf/synonyms.txt       |  2 +-
 .../example-DIH/solr/mail/conf/managed-schema   | 26 ++++++--
 .../example-DIH/solr/mail/conf/synonyms.txt     |  2 +-
 .../example-DIH/solr/rss/conf/managed-schema    | 31 ++++++---
 .../example-DIH/solr/rss/conf/synonyms.txt      |  2 +-
 .../example-DIH/solr/solr/conf/managed-schema   | 26 ++++++--
 .../example-DIH/solr/solr/conf/synonyms.txt     |  2 +-
 .../example-DIH/solr/tika/conf/managed-schema   | 23 +++++--
 solr/example/files/conf/managed-schema          | 20 ++++--
 solr/example/files/conf/synonyms.txt            |  2 +-
 .../basic_configs/conf/managed-schema           | 26 ++++++--
 .../configsets/basic_configs/conf/synonyms.txt  |  2 +-
 .../conf/managed-schema                         | 26 ++++++--
 .../conf/synonyms.txt                           |  2 +-
 .../conf/managed-schema                         | 26 ++++++--
 .../conf/synonyms.txt                           |  2 +-
 .../solrj/solr/collection1/conf/schema-sql.xml  | 59 ++++++++++++-----
 .../solrj/solr/collection1/conf/schema.xml      | 59 ++++++++++++-----
 .../solr/configsets/streaming/conf/schema.xml   | 56 +++++++++++-----
 .../solr/client/solrj/request/SchemaTest.java   |  2 +-
 56 files changed, 984 insertions(+), 383 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index b380e6b..3403c90 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -151,6 +151,8 @@ Other Changes
 
 * SOLR-10352: bin/solr script now prints warning when available system entropy is lower than 300 (Esther Quansah via
   Ishan Chattopadhyaya)
+  
+* SOLR-10344: Update Solr default/example and test configs to use WordDelimiterGraphFilterFactory. (Steve Rowe)
 
 ==================  6.5.0 ==================
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml
index 0c06a48..02626a0 100644
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml
+++ b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml
@@ -155,7 +155,7 @@
     </analyzer>
   </fieldType>
 
-  <!-- A text field that uses WordDelimiterFilter to enable splitting and matching of
+  <!-- A text field that uses WordDelimiterGraphFilter to enable splitting and matching of
       words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
       so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
       Synonyms and stopwords are customized by external files, and stemming is enabled.
@@ -174,18 +174,19 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory" />
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -198,11 +199,23 @@
   <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
        but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
   <fieldType name="textTight" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+              catenateNumbers="1" catenateAll="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+      <filter class="solr.EnglishMinimalStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory" />
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/synonyms.txt b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/synonyms.txt
index b0e31cb..26d237a 100644
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/synonyms.txt
+++ b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/synonyms.txt
@@ -23,7 +23,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-schema-no-unique-key.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-schema-no-unique-key.xml b/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-schema-no-unique-key.xml
index 068322e..84d0dbe 100644
--- a/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-schema-no-unique-key.xml
+++ b/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-schema-no-unique-key.xml
@@ -158,7 +158,7 @@
     </analyzer>
   </fieldType>
 
-  <!-- A text field that uses WordDelimiterFilter to enable splitting and matching of
+  <!-- A text field that uses WordDelimiterGraphFilter to enable splitting and matching of
       words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
       so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
       Synonyms and stopwords are customized by external files, and stemming is enabled.
@@ -172,18 +172,19 @@
       <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <!--<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>-->
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <!--<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-schema.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-schema.xml
index 8d62c84..86bd231 100644
--- a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-schema.xml
+++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-schema.xml
@@ -10,23 +10,32 @@
   <fieldType name="text" class="solr.TextField" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory" />
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
     </analyzer>
   </fieldType>
   <fieldType name="textTight" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+              catenateNumbers="1" catenateAll="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory" />
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solr_id-schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solr_id-schema.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solr_id-schema.xml
index 9a95797..da4bd39 100644
--- a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solr_id-schema.xml
+++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solr_id-schema.xml
@@ -156,7 +156,7 @@
     </analyzer>
   </fieldType>
 
-  <!-- A text field that uses WordDelimiterFilter to enable splitting and matching of
+  <!-- A text field that uses WordDelimiterGraphFilter to enable splitting and matching of
       words on case-change, alpha numeric boundaries, and non-alphanumeric chars,
       so that a query of "wifi" or "wi fi" could match a document containing "Wi-Fi".
       Synonyms and stopwords are customized by external files, and stemming is enabled.
@@ -170,18 +170,19 @@
       <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <!--<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>-->
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <!--<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -194,11 +195,23 @@
   <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
        but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
   <fieldType name="textTight" class="solr.TextField" positionIncrementGap="100">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>-->
+      <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+              catenateNumbers="1" catenateAll="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <!--<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
+      <filter class="solr.EnglishMinimalStemFilterFactory"/>-->
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>-->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <!--<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml b/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
index bd9adbe..b743ed8 100644
--- a/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
+++ b/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
@@ -55,26 +55,41 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <fieldType name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -233,15 +248,16 @@
   <fieldType name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -253,13 +269,14 @@
   <fieldType name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -269,13 +286,14 @@
   <fieldType name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/contrib/ltr/src/test-files/solr/collection1/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/contrib/ltr/src/test-files/solr/collection1/conf/synonyms.txt b/solr/contrib/ltr/src/test-files/solr/collection1/conf/synonyms.txt
index 0ef0e8d..461ed4d 100644
--- a/solr/contrib/ltr/src/test-files/solr/collection1/conf/synonyms.txt
+++ b/solr/contrib/ltr/src/test-files/solr/collection1/conf/synonyms.txt
@@ -21,7 +21,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml b/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml
index 5e0273e..89d44e6 100644
--- a/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml
+++ b/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml
@@ -191,7 +191,7 @@
     </fieldType>
 
     <!--
-      A text field that uses WordDelimiterFilter to enable splitting and
+      A text field that uses WordDelimiterGraphFilter to enable splitting and
       matching of words on case-change, alpha numeric boundaries, and
       non-alphanumeric chars, so that a query of "wifi" or "wi fi" could
       match a document containing "Wi-Fi". Synonyms and stopwords are
@@ -212,11 +212,11 @@
         -->
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />
-        <filter class="solr.WordDelimiterFilterFactory"
+        <filter class="solr.WordDelimiterGraphFilterFactory"
           generateWordParts="1" generateNumberParts="1" catenateWords="1"
           catenateNumbers="1" catenateAll="0" splitOnCaseChange="1" />
         <filter class="solr.LowerCaseFilterFactory" />
-        
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory" />
@@ -224,7 +224,7 @@
           ignoreCase="true" expand="true" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />
-        <filter class="solr.WordDelimiterFilterFactory"
+        <filter class="solr.WordDelimiterGraphFilterFactory"
           generateWordParts="1" generateNumberParts="1" catenateWords="0"
           catenateNumbers="0" catenateAll="0" splitOnCaseChange="1" />
         <filter class="solr.LowerCaseFilterFactory" />
@@ -240,23 +240,36 @@
     -->
     <fieldType name="textTight" class="solr.TextField"
       positionIncrementGap="100">
-      <analyzer>
+      <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory" />
         <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"
           ignoreCase="true" expand="false" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />
-        <filter class="solr.WordDelimiterFilterFactory"
+        <filter class="solr.WordDelimiterGraphFilterFactory"
           generateWordParts="0" generateNumberParts="0" catenateWords="1"
           catenateNumbers="1" catenateAll="0" />
         <filter class="solr.LowerCaseFilterFactory" />
         
         <!--
           this filter can remove any duplicate tokens that appear at the
-          same position - sometimes possible with WordDelimiterFilter in
+          same position - sometimes possible with WordDelimiterGraphFilter in
           conjuncton with stemming.
         -->
         <filter class="solr.RemoveDuplicatesTokenFilterFactory" />
+        <filter class="solr.FlattenGraphFilterFactory" />
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.MockTokenizerFactory" />
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"
+                ignoreCase="true" expand="false" />
+        <filter class="solr.StopFilterFactory" ignoreCase="true"
+                words="stopwords.txt" />
+        <filter class="solr.WordDelimiterGraphFilterFactory"
+                generateWordParts="0" generateNumberParts="0" catenateWords="1"
+                catenateNumbers="1" catenateAll="0" />
+        <filter class="solr.LowerCaseFilterFactory" />
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory" />
       </analyzer>
     </fieldType>
 
@@ -271,10 +284,11 @@
         <tokenizer class="solr.MockTokenizerFactory" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />
-        <filter class="solr.WordDelimiterFilterFactory"
+        <filter class="solr.WordDelimiterGraphFilterFactory"
           generateWordParts="1" generateNumberParts="1" catenateWords="1"
           catenateNumbers="1" catenateAll="0" splitOnCaseChange="0" />
         <filter class="solr.LowerCaseFilterFactory" />
+        <filter class="solr.FlattenGraphFilterFactory" />
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory" />
@@ -282,7 +296,7 @@
           ignoreCase="true" expand="true" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />
-        <filter class="solr.WordDelimiterFilterFactory"
+        <filter class="solr.WordDelimiterGraphFilterFactory"
           generateWordParts="1" generateNumberParts="1" catenateWords="0"
           catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" />
         <filter class="solr.LowerCaseFilterFactory" />
@@ -301,13 +315,14 @@
         <tokenizer class="solr.MockTokenizerFactory" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />
-        <filter class="solr.WordDelimiterFilterFactory"
+        <filter class="solr.WordDelimiterGraphFilterFactory"
           generateWordParts="1" generateNumberParts="1" catenateWords="1"
           catenateNumbers="1" catenateAll="0" splitOnCaseChange="0" />
         <filter class="solr.LowerCaseFilterFactory" />
         <filter class="solr.ReversedWildcardFilterFactory"
           withOriginal="true" maxPosAsterisk="3" maxPosQuestion="2"
           maxFractionAsterisk="0.33" />
+        <filter class="solr.FlattenGraphFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory" />
@@ -315,7 +330,7 @@
           ignoreCase="true" expand="true" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />
-        <filter class="solr.WordDelimiterFilterFactory"
+        <filter class="solr.WordDelimiterGraphFilterFactory"
           generateWordParts="1" generateNumberParts="1" catenateWords="0"
           catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" />
         <filter class="solr.LowerCaseFilterFactory" />

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/synonyms.txt b/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/synonyms.txt
index b0e31cb..26d237a 100644
--- a/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/synonyms.txt
+++ b/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/synonyms.txt
@@ -23,7 +23,7 @@ fooaaa,baraaa,bazaaa
 GB,gib,gigabyte,gigabytes
 MB,mib,megabyte,megabytes
 Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
 #after us won't split it into two words.
 
 # Synonym mappings can be used for spelling correction too

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml b/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml
index 0007be7..229d69b 100644
--- a/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml
+++ b/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml
@@ -191,7 +191,7 @@
   </fieldType>
 
   <!--
-    A text field that uses WordDelimiterFilter to enable splitting and
+    A text field that uses WordDelimiterGraphFilter to enable splitting and
     matching of words on case-change, alpha numeric boundaries, and
     non-alphanumeric chars, so that a query of "wifi" or "wi fi" could
     match a document containing "Wi-Fi". Synonyms and stopwords are
@@ -210,16 +210,18 @@
       <!--
         Case insensitive stop word removal. add
       -->
-      <filter class="solr.WordDelimiterFilterFactory"
+      <filter class="solr.WordDelimiterGraphFilterFactory"
               generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
 
+      <filter class="solr.FlattenGraphFilterFactory"/>
+
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
 
-      <filter class="solr.WordDelimiterFilterFactory"
+      <filter class="solr.WordDelimiterGraphFilterFactory"
               generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
@@ -235,19 +237,29 @@
   -->
   <fieldType name="textTight" class="solr.TextField"
              positionIncrementGap="100">
-    <analyzer>
+    <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory"
+      <filter class="solr.WordDelimiterGraphFilterFactory"
               generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
 
       <!--
         this filter can remove any duplicate tokens that appear at the
-        same position - sometimes possible with WordDelimiterFilter in
+        same position - sometimes possible with WordDelimiterGraphFilter in
         conjuncton with stemming.
       -->
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+
+      <filter class="solr.FlattenGraphFilterFactory" />
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory"
+              generateWordParts="0" generateNumberParts="0" catenateWords="1"
+              catenateNumbers="1" catenateAll="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
     </analyzer>
   </fieldType>
 
@@ -277,14 +289,15 @@
              positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory"
+      <filter class="solr.WordDelimiterGraphFilterFactory"
               generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory"
+      <filter class="solr.WordDelimiterGraphFilterFactory"
               generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
@@ -301,17 +314,18 @@
              positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory"
+      <filter class="solr.WordDelimiterGraphFilterFactory"
               generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.ReversedWildcardFilterFactory"
               withOriginal="true" maxPosAsterisk="3" maxPosQuestion="2"
               maxFractionAsterisk="0.33"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory"
+      <filter class="solr.WordDelimiterGraphFilterFactory"
               generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-HighlighterMaxOffsetTest.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-HighlighterMaxOffsetTest.xml b/solr/core/src/test-files/solr/collection1/conf/schema-HighlighterMaxOffsetTest.xml
index 808453c..122d4ce 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-HighlighterMaxOffsetTest.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-HighlighterMaxOffsetTest.xml
@@ -67,16 +67,17 @@ Test for HighlighterMaxOffsetTest which requires the use of ReversedWildcardFilt
   <fieldType name="text_stx" class="solr.TextField" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.ReversedWildcardFilterFactory" withOriginal="true"
               maxPosAsterisk="3" maxPosQuestion="2" maxFractionAsterisk="0.33"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml b/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
index 47ddeb2..a9df7f8 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
@@ -60,17 +60,25 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory" />
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -229,15 +237,16 @@
   <fieldType name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -249,13 +258,14 @@
   <fieldType name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -265,13 +275,14 @@
   <fieldType name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-docValuesJoin.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-docValuesJoin.xml b/solr/core/src/test-files/solr/collection1/conf/schema-docValuesJoin.xml
index 1d559bb..baed872 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-docValuesJoin.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-docValuesJoin.xml
@@ -42,11 +42,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -55,7 +56,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-folding.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-folding.xml b/solr/core/src/test-files/solr/collection1/conf/schema-folding.xml
index cc9cae3..2b0bf32 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-folding.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-folding.xml
@@ -47,7 +47,7 @@
     </analyzer>
     <analyzer type="multiterm">        <!-- Intentionally different to test that these are kept  distinct -->
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.ASCIIFoldingFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-hash.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-hash.xml b/solr/core/src/test-files/solr/collection1/conf/schema-hash.xml
index b132473..6a7987c 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-hash.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-hash.xml
@@ -72,26 +72,41 @@
   <fieldtype name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldtype>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldtype name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldtype>
 
   <fieldtype name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -191,9 +206,16 @@
     </analyzer>
   </fieldtype>
   <fieldtype name="lowerpunctfilt" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+              catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -265,15 +287,16 @@
   <fieldtype name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -285,15 +308,16 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="0" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
@@ -304,8 +328,9 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" splitOnNumerics="0" splitOnCaseChange="0" generateWordParts="1"
               generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -318,13 +343,14 @@
   <fieldtype name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -334,13 +360,14 @@
   <fieldtype name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-psuedo-fields.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-psuedo-fields.xml b/solr/core/src/test-files/solr/collection1/conf/schema-psuedo-fields.xml
index 20f2d2d..8491ce8 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-psuedo-fields.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-psuedo-fields.xml
@@ -50,11 +50,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -63,7 +64,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml b/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
index d3a7818..c92b901 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
@@ -60,9 +60,10 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
@@ -219,15 +220,16 @@
   <fieldType name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -239,13 +241,14 @@
   <fieldType name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -255,13 +258,14 @@
   <fieldType name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/b46b8278/solr/core/src/test-files/solr/collection1/conf/schema-rest.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-rest.xml b/solr/core/src/test-files/solr/collection1/conf/schema-rest.xml
index 8bd603b..7d9bf02 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-rest.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-rest.xml
@@ -45,26 +45,41 @@
   <fieldType name="failtype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <!-- Demonstrating ignoreCaseChange -->
   <fieldType name="wdf_nocase" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
 
   <fieldType name="wdf_preserve" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+              catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -90,11 +105,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -103,7 +119,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -153,11 +169,12 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -166,7 +183,7 @@
               ignoreCase="true"
               words="stopwords.txt"
       />
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -300,15 +317,16 @@
   <fieldType name="subword" class="solr.TextField" multiValued="true" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.StopFilterFactory"/>
@@ -320,16 +338,17 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="0" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1"
               catenateAll="0"/>
       <filter class="solr.StopFilterFactory"/>
@@ -341,9 +360,10 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.LowerCaseFilterFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" protected="protwords.txt" splitOnNumerics="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" protected="protwords.txt" splitOnNumerics="0"
               splitOnCaseChange="0" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0"
               catenateAll="0"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
@@ -356,13 +376,14 @@
   <fieldType name="skutype1" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -372,13 +393,14 @@
   <fieldType name="skutype2" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -401,16 +423,17 @@
       <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-      <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+      <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.PorterStemFilterFactory"/>


[12/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10341: SQL AVG function mis-interprets field type

Posted by ab...@apache.org.
SOLR-10341: SQL AVG function mis-interprets field type


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/aa2b46a6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/aa2b46a6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/aa2b46a6

Branch: refs/heads/jira/solr-9959
Commit: aa2b46a62a52c0d0117312add2a667bf6b14a709
Parents: 1a80e4d
Author: Joel Bernstein <jb...@apache.org>
Authored: Tue Mar 28 09:25:25 2017 +0100
Committer: Joel Bernstein <jb...@apache.org>
Committed: Tue Mar 28 18:42:18 2017 +0100

----------------------------------------------------------------------
 .../apache/solr/handler/sql/SolrAggregate.java  |   6 +-
 .../apache/solr/handler/sql/SolrEnumerator.java |   1 +
 .../apache/solr/handler/sql/SolrProject.java    |   2 +-
 .../org/apache/solr/handler/sql/SolrRel.java    |  10 +-
 .../org/apache/solr/handler/sql/SolrSchema.java |   4 +
 .../org/apache/solr/handler/sql/SolrTable.java  |  43 +++-
 .../handler/sql/SolrToEnumerableConverter.java  |   1 +
 .../org/apache/solr/handler/TestSQLHandler.java | 250 ++++++++++++++++---
 .../client/solrj/io/stream/FacetStream.java     |   8 +-
 .../client/solrj/io/stream/StatsStream.java     |  14 +-
 .../solrj/io/stream/metrics/MeanMetric.java     |  35 ++-
 .../client/solrj/io/stream/metrics/Metric.java  |   1 +
 .../stream/StreamExpressionToExpessionTest.java |   8 +-
 13 files changed, 320 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/core/src/java/org/apache/solr/handler/sql/SolrAggregate.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrAggregate.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrAggregate.java
index 8c4d46d..f207eeb 100644
--- a/solr/core/src/java/org/apache/solr/handler/sql/SolrAggregate.java
+++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrAggregate.java
@@ -69,16 +69,18 @@ class SolrAggregate extends Aggregate implements SolrRel {
 
     for(Pair<AggregateCall, String> namedAggCall : getNamedAggCalls()) {
 
-
       AggregateCall aggCall = namedAggCall.getKey();
 
       Pair<String, String> metric = toSolrMetric(implementor, aggCall, inNames);
       implementor.addReverseAggMapping(namedAggCall.getValue(), metric.getKey().toLowerCase(Locale.ROOT)+"("+metric.getValue()+")");
       implementor.addMetricPair(namedAggCall.getValue(), metric.getKey(), metric.getValue());
+      /*
       if(aggCall.getName() == null) {
+        System.out.println("AGG:"+namedAggCall.getValue()+":"+ aggCall.getAggregation().getName() + "(" + inNames.get(aggCall.getArgList().get(0)) + ")");
         implementor.addFieldMapping(namedAggCall.getValue(),
-            aggCall.getAggregation().getName() + "(" + inNames.get(aggCall.getArgList().get(0)) + ")");
+          aggCall.getAggregation().getName() + "(" + inNames.get(aggCall.getArgList().get(0)) + ")");
       }
+      */
     }
 
     for(int group : getGroupSet()) {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/core/src/java/org/apache/solr/handler/sql/SolrEnumerator.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrEnumerator.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrEnumerator.java
index be6046c..7ba3838 100644
--- a/solr/core/src/java/org/apache/solr/handler/sql/SolrEnumerator.java
+++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrEnumerator.java
@@ -43,6 +43,7 @@ class SolrEnumerator implements Enumerator<Object> {
    * @param fields Fields to get from each Tuple
    */
   SolrEnumerator(TupleStream tupleStream, List<Map.Entry<String, Class>> fields) {
+
     this.tupleStream = tupleStream;
     try {
       this.tupleStream.open();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/core/src/java/org/apache/solr/handler/sql/SolrProject.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrProject.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrProject.java
index c4217f2..bd36ba8 100644
--- a/solr/core/src/java/org/apache/solr/handler/sql/SolrProject.java
+++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrProject.java
@@ -58,7 +58,7 @@ class SolrProject extends Project implements SolrRel {
     for (Pair<RexNode, String> pair : getNamedProjects()) {
       final String name = pair.right;
       final String expr = pair.left.accept(translator);
-      implementor.addFieldMapping(name, expr);
+      implementor.addFieldMapping(name, expr, false);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/core/src/java/org/apache/solr/handler/sql/SolrRel.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrRel.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrRel.java
index d4de2c6..370de16 100644
--- a/solr/core/src/java/org/apache/solr/handler/sql/SolrRel.java
+++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrRel.java
@@ -47,9 +47,11 @@ interface SolrRel extends RelNode {
     RelOptTable table;
     SolrTable solrTable;
 
-    void addFieldMapping(String key, String val) {
-      if(key != null && !fieldMappings.containsKey(key)) {
-        this.fieldMappings.put(key, val);
+    void addFieldMapping(String key, String val, boolean overwrite) {
+      if(key != null) {
+        if(overwrite || !fieldMappings.containsKey(key)) {
+          this.fieldMappings.put(key, val);
+        }
       }
     }
 
@@ -83,7 +85,7 @@ interface SolrRel extends RelNode {
 
       String metricIdentifier = metric.toLowerCase(Locale.ROOT) + "(" + column + ")";
       if(outName != null) {
-        this.addFieldMapping(outName, metricIdentifier);
+        this.addFieldMapping(outName, metricIdentifier, true);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/core/src/java/org/apache/solr/handler/sql/SolrSchema.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrSchema.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrSchema.java
index 83fa537..20d01f3 100644
--- a/solr/core/src/java/org/apache/solr/handler/sql/SolrSchema.java
+++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrSchema.java
@@ -99,10 +99,14 @@ class SolrSchema extends AbstractSchema {
         case "string":
           type = typeFactory.createJavaType(String.class);
           break;
+        case "tint":
+        case "tlong":
         case "int":
         case "long":
           type = typeFactory.createJavaType(Long.class);
           break;
+        case "tfloat":
+        case "tdouble":
         case "float":
         case "double":
           type = typeFactory.createJavaType(Double.class);

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/core/src/java/org/apache/solr/handler/sql/SolrTable.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrTable.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrTable.java
index e313b44..b7f552b 100644
--- a/solr/core/src/java/org/apache/solr/handler/sql/SolrTable.java
+++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrTable.java
@@ -128,7 +128,7 @@ class SolrTable extends AbstractQueryableTable implements TranslatableTable {
         tupleStream = handleSelect(zk, collection, q, fields, orders, limit);
       } else {
         if(buckets.isEmpty()) {
-          tupleStream = handleStats(zk, collection, q, metricPairs);
+          tupleStream = handleStats(zk, collection, q, metricPairs, fields);
         } else {
           if(mapReduce) {
             tupleStream = handleGroupByMapReduce(zk,
@@ -430,6 +430,11 @@ class SolrTable extends AbstractQueryableTable implements TranslatableTable {
                                              final String limit,
                                              final String havingPredicate) throws IOException {
 
+    Map<String, Class> fmap = new HashMap();
+    for(Map.Entry<String, Class> entry : fields) {
+      fmap.put(entry.getKey(), entry.getValue());
+    }
+
     int numWorkers = Integer.parseInt(properties.getProperty("numWorkers", "1"));
 
     Bucket[] buckets = buildBuckets(_buckets, fields);
@@ -437,6 +442,13 @@ class SolrTable extends AbstractQueryableTable implements TranslatableTable {
 
     if(metrics.length == 0) {
       return handleSelectDistinctMapReduce(zk, collection, properties, fields, query, orders, buckets, limit);
+    } else {
+      for(Metric metric : metrics) {
+        Class c = fmap.get(metric.getIdentifier());
+        if(Long.class.equals(c)) {
+          metric.outputLong = true;
+        }
+      }
     }
 
     Set<String> fieldSet = getFieldSet(metrics, fields);
@@ -556,6 +568,12 @@ class SolrTable extends AbstractQueryableTable implements TranslatableTable {
                                          final String lim,
                                          final String havingPredicate) throws IOException {
 
+
+    Map<String, Class> fmap = new HashMap();
+    for(Map.Entry<String, Class> f : fields) {
+      fmap.put(f.getKey(), f.getValue());
+    }
+
     ModifiableSolrParams solrParams = new ModifiableSolrParams();
     solrParams.add(CommonParams.Q, query);
 
@@ -564,6 +582,13 @@ class SolrTable extends AbstractQueryableTable implements TranslatableTable {
     if(metrics.length == 0) {
       metrics = new Metric[1];
       metrics[0] = new CountMetric();
+    } else {
+      for(Metric metric : metrics) {
+        Class c = fmap.get(metric.getIdentifier());
+        if(Long.class.equals(c)) {
+          metric.outputLong = true;
+        }
+      }
     }
 
     int limit = lim != null ? Integer.parseInt(lim) : 1000;
@@ -767,12 +792,26 @@ class SolrTable extends AbstractQueryableTable implements TranslatableTable {
   private TupleStream handleStats(String zk,
                                   String collection,
                                   String query,
-                                  List<Pair<String, String>> metricPairs) {
+                                  List<Pair<String, String>> metricPairs,
+                                  List<Map.Entry<String, Class>> fields) {
+
 
+    Map<String, Class> fmap = new HashMap();
+    for(Map.Entry<String, Class> entry : fields) {
+      fmap.put(entry.getKey(), entry.getValue());
+    }
 
     ModifiableSolrParams solrParams = new ModifiableSolrParams();
     solrParams.add(CommonParams.Q, query);
     Metric[] metrics = buildMetrics(metricPairs, false).toArray(new Metric[0]);
+
+    for(Metric metric : metrics) {
+      Class c = fmap.get(metric.getIdentifier());
+      if(Long.class.equals(c)) {
+        metric.outputLong = true;
+      }
+    }
+
     return new StatsStream(zk, collection, solrParams, metrics);
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverter.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverter.java b/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverter.java
index 10d4d4c..c97303b 100644
--- a/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverter.java
+++ b/solr/core/src/java/org/apache/solr/handler/sql/SolrToEnumerableConverter.java
@@ -93,6 +93,7 @@ class SolrToEnumerableConverter extends ConverterImpl implements EnumerableRel {
   }
 
   private List<String> generateFields(List<String> queryFields, Map<String, String> fieldMappings) {
+
     if(fieldMappings.isEmpty()) {
       return queryFields;
     } else {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java b/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java
index cb16f03..4889c90 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestSQLHandler.java
@@ -88,6 +88,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
     testWhere();
     testMixedCaseFields();
     testBasicGrouping();
+    testBasicGroupingTint();
     testBasicGroupingFacets();
     testSelectDistinct();
     testSelectDistinctFacets();
@@ -669,7 +670,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       commit();
 
       SolrParams sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
-        "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s order by sum(field_i) asc limit 2");
+        "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) from collection1 where text='XXXX' group by str_s order by sum(field_i) asc limit 2");
 
       SolrStream solrStream = new SolrStream(jetty.url, sParams);
       List<Tuple> tuples = getTuples(solrStream);
@@ -684,7 +685,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       tuple = tuples.get(1);
       assert(tuple.get("str_s").equals("a"));
@@ -692,10 +693,36 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 7); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 14); //avg(field_i)
+
+
+      sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
+          "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) as blah from collection1 where text='XXXX' group by str_s order by sum(field_i) asc limit 2");
+
+      solrStream = new SolrStream(jetty.url, sParams);
+      tuples = getTuples(solrStream);
+
+      //Only two results because of the limit.
+      assert(tuples.size() == 2);
+
+      tuple = tuples.get(0);
+      assert(tuple.get("str_s").equals("b"));
+      assert(tuple.getDouble("EXPR$1") == 2); //count(*)
+      assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
+      assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
+      assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
+      assert(tuple.getDouble("blah") == 9.5); //avg(field_i)
+
+      tuple = tuples.get(1);
+      assert(tuple.get("str_s").equals("a"));
+      assert(tuple.getDouble("EXPR$1") == 2); //count(*)
+      assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i)
+      assert(tuple.getDouble("EXPR$3") == 7); //min(field_i)
+      assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
+      assert(tuple.getDouble("blah") == 13.5); //avg(field_i)
 
       sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
-          "stmt", "select str_s as myString, count(*), sum(field_i) as mySum, min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s order by mySum asc limit 2");
+          "stmt", "select str_s as myString, count(*), sum(field_i) as mySum, min(field_i), max(field_i), avg(field_i)  from collection1 where text='XXXX' group by str_s order by mySum asc limit 2");
 
       solrStream = new SolrStream(jetty.url, sParams);
       tuples = getTuples(solrStream);
@@ -709,7 +736,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("mySum") == 19);
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       tuple = tuples.get(1);
       assert(tuple.get("myString").equals("a"));
@@ -717,11 +744,11 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("mySum") == 27);
       assert(tuple.getDouble("EXPR$3") == 7); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 14); //avg(field_i)
 
       sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
         "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), "
-          + "cast(avg(1.0 * field_i) as float) from collection1 where (text='XXXX' AND NOT ((text='XXXY') AND (text='XXXY' OR text='XXXY'))) "
+          + "avg(field_i) from collection1 where (text='XXXX' AND NOT ((text='XXXY') AND (text='XXXY' OR text='XXXY'))) "
           + "group by str_s order by str_s desc");
 
       solrStream = new SolrStream(jetty.url, sParams);
@@ -746,7 +773,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10D); //avg(field_i)
 
       tuple = tuples.get(2);
       assert(tuple.get("str_s").equals("a"));
@@ -755,11 +782,11 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$3") == 7); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
 
-      assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 14); //avg(field_i)
 
       sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
           "stmt", "select str_s as myString, count(*) as myCount, sum(field_i) as mySum, min(field_i) as myMin, "
-          + "max(field_i) as myMax, cast(avg(1.0 * field_i) as float) as myAvg from collection1 "
+          + "max(field_i) as myMax, avg(field_i) as myAvg from collection1 "
           + "where (text='XXXX' AND NOT (text='XXXY')) group by str_s order by str_s desc");
 
       solrStream = new SolrStream(jetty.url, sParams);
@@ -784,7 +811,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("mySum") == 19);
       assert(tuple.getDouble("myMin") == 8);
       assert(tuple.getDouble("myMax") == 11);
-      assert(tuple.getDouble("myAvg") == 9.5D);
+      assert(tuple.getDouble("myAvg") == 10);
 
       tuple = tuples.get(2);
       assert(tuple.get("myString").equals("a"));
@@ -792,10 +819,10 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("mySum") == 27);
       assert(tuple.getDouble("myMin") == 7);
       assert(tuple.getDouble("myMax") == 20);
-      assert(tuple.getDouble("myAvg") == 13.5D);
+      assert(tuple.getDouble("myAvg") == 14);
 
       sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
-          "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) " +
+          "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) " +
           "from collection1 where text='XXXX' group by str_s having sum(field_i) = 19");
 
       solrStream = new SolrStream(jetty.url, sParams);
@@ -809,10 +836,10 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
-          "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), cast(avg(1.0 * field_i) as float) " +
+          "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), avg(field_i) " +
           "from collection1 where text='XXXX' group by str_s having ((sum(field_i) = 19) AND (min(field_i) = 8))");
 
       solrStream = new SolrStream(jetty.url, sParams);
@@ -826,11 +853,11 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
           "stmt", "select str_s, count(*), sum(field_i) as mySum, min(field_i), max(field_i), " +
-          "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " +
+          "avg(field_i) from collection1 where text='XXXX' group by str_s " +
           "having ((sum(field_i) = 19) AND (min(field_i) = 8))");
 
       solrStream = new SolrStream(jetty.url, sParams);
@@ -844,11 +871,11 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("mySum") == 19);
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "map_reduce",
           "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
-          "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " +
+          "avg(field_i) from collection1 where text='XXXX' group by str_s " +
           "having ((sum(field_i) = 19) AND (min(field_i) = 100))");
 
       solrStream = new SolrStream(jetty.url, sParams);
@@ -860,6 +887,60 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
     }
   }
 
+
+  private void testBasicGroupingTint() throws Exception {
+    try {
+
+      CloudJettyRunner jetty = this.cloudJettys.get(0);
+
+      del("*:*");
+
+      commit();
+
+      indexr("id", "1", "text", "XXXX XXXX", "str_s", "a", "field_ti", "7");
+      indexr("id", "2", "text", "XXXX XXXX", "str_s", "b", "field_ti", "8");
+      indexr("id", "3", "text", "XXXX XXXX", "str_s", "a", "field_ti", "20");
+      indexr("id", "4", "text", "XXXX XXXX", "str_s", "b", "field_ti", "11");
+      indexr("id", "5", "text", "XXXX XXXX", "str_s", "c", "field_ti", "30");
+      indexr("id", "6", "text", "XXXX XXXX", "str_s", "c", "field_ti", "40");
+      indexr("id", "7", "text", "XXXX XXXX", "str_s", "c", "field_ti", "50");
+      indexr("id", "8", "text", "XXXX XXXX", "str_s", "c", "field_ti", "60");
+      indexr("id", "9", "text", "XXXX XXXY", "str_s", "d", "field_ti", "70");
+      commit();
+
+      SolrParams sParams = mapParams(CommonParams.QT, "/sql",
+          "stmt", "select str_s, count(*), sum(field_ti), min(field_ti), max(field_ti), avg(field_ti) from collection1 where text='XXXX' group by str_s order by sum(field_ti) asc limit 2");
+
+      SolrStream solrStream = new SolrStream(jetty.url, sParams);
+      List<Tuple> tuples = getTuples(solrStream);
+
+      //Only two results because of the limit.
+      assert(tuples.size() == 2);
+      Tuple tuple;
+
+      tuple = tuples.get(0);
+      assert(tuple.get("str_s").equals("b"));
+      assert(tuple.getDouble("EXPR$1") == 2); //count(*)
+      assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
+      assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
+      assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
+
+      tuple = tuples.get(1);
+      assert(tuple.get("str_s").equals("a"));
+      assert(tuple.getDouble("EXPR$1") == 2); //count(*)
+      assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i)
+      assert(tuple.getDouble("EXPR$3") == 7); //min(field_i)
+      assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
+      assert(tuple.getDouble("EXPR$5") == 14); //avg(field_i)
+
+
+
+    } finally {
+      delete();
+    }
+  }
+
   private void testSelectDistinctFacets() throws Exception {
     try {
       CloudJettyRunner jetty = this.cloudJettys.get(0);
@@ -1506,6 +1587,35 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
       assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i)
 
+
+      sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
+          "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
+              "avg(field_i) from collection1 where text='XXXX' group by str_s " +
+              "order by sum(field_i) asc limit 2");
+
+      solrStream = new SolrStream(jetty.url, sParams);
+      tuples = getTuples(solrStream);
+
+      //Only two results because of the limit.
+      assert(tuples.size() == 2);
+
+      tuple = tuples.get(0);
+      assert(tuple.get("str_s").equals("b"));
+      assert(tuple.getDouble("EXPR$1") == 2); //count(*)
+      assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
+      assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
+      assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
+
+      tuple = tuples.get(1);
+      assert(tuple.get("str_s").equals("a"));
+      assert(tuple.getDouble("EXPR$1") == 2); //count(*)
+      assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i)
+      assert(tuple.getDouble("EXPR$3") == 7); //min(field_i)
+      assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
+      assert(tuple.getDouble("EXPR$5") == 14); //avg(field_i)
+
+
       sParams = mapParams(CommonParams.QT, "/sql", "aggregationMode", "facet",
         "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), "
           + "cast(avg(1.0 * field_i) as float) from collection1 where (text='XXXX' AND NOT (text='XXXY')) "
@@ -1667,7 +1777,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
 
       SolrParams sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
           "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
-          "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " +
+          "avg(field_i) from collection1 where text='XXXX' group by str_s " +
           "order by sum(field_i) asc limit 2");
 
       SolrStream solrStream = new SolrStream(jetty.url, sParams);
@@ -1684,7 +1794,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       tuple = tuples.get(1);
       assert(tuple.get("str_s").equals("a"));
@@ -1692,12 +1802,41 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 7);  //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 14); //avg(field_i)
+
+
+      sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
+          "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
+              "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " +
+              "order by sum(field_i) asc limit 2");
+
+      solrStream = new SolrStream(jetty.url, sParams);
+      tuples = getTuples(solrStream);
+
+      //Only two results because of the limit.
+      assert(tuples.size() == 2);
+
+      tuple = tuples.get(0);
+      assert(tuple.get("str_s").equals("b"));
+      assert(tuple.getDouble("EXPR$1") == 2); //count(*)
+      assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
+      assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
+      assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
+      assert(tuple.getDouble("EXPR$5") == 9.5); //avg(field_i)
+
+      tuple = tuples.get(1);
+      assert(tuple.get("str_s").equals("a"));
+      assert(tuple.getDouble("EXPR$1") == 2); //count(*)
+      assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i)
+      assert(tuple.getDouble("EXPR$3") == 7);  //min(field_i)
+      assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
+      assert(tuple.getDouble("EXPR$5") == 13.5); //avg(field_i)
+
 
 
       sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
           "stmt", "select str_s, count(*), sum(field_i) as mySum, min(field_i), max(field_i), " +
-          "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s order by mySum asc limit 2");
+          "avg(field_i) from collection1 where text='XXXX' group by str_s order by mySum asc limit 2");
 
       solrStream = new SolrStream(jetty.url, sParams);
       tuples = getTuples(solrStream);
@@ -1711,7 +1850,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("mySum") == 19);
       assert(tuple.getDouble("EXPR$3") == 8);  //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       tuple = tuples.get(1);
       assert(tuple.get("str_s").equals("a"));
@@ -1719,12 +1858,12 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("mySum") == 27);
       assert(tuple.getDouble("EXPR$3") == 7); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 14); //avg(field_i)
 
 
       sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
           "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
-          "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s order by str_s desc");
+          "avg(field_i) from collection1 where text='XXXX' group by str_s order by str_s desc");
 
       solrStream = new SolrStream(jetty.url, sParams);
       tuples = getTuples(solrStream);
@@ -1748,7 +1887,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       tuple = tuples.get(2);
       assert(tuple.get("str_s").equals("a"));
@@ -1756,12 +1895,12 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 7); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 14); //avg(field_i)
 
 
       sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
           "stmt", "select str_s as myString, count(*), sum(field_i), min(field_i), max(field_i), " +
-          "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s order by myString desc");
+          "avg(field_i) from collection1 where text='XXXX' group by str_s order by myString desc");
 
       solrStream = new SolrStream(jetty.url, sParams);
       tuples = getTuples(solrStream);
@@ -1785,7 +1924,7 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       tuple = tuples.get(2);
       assert(tuple.get("myString").equals("a"));
@@ -1793,12 +1932,12 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 27); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 7); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 20); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 13.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 14); //avg(field_i)
 
 
       sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
           "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
-          "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s having sum(field_i) = 19");
+          "avg(field_i) from collection1 where text='XXXX' group by str_s having sum(field_i) = 19");
 
       solrStream = new SolrStream(jetty.url, sParams);
       tuples = getTuples(solrStream);
@@ -1811,11 +1950,11 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
           "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
-          "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " +
+          "avg(field_i) from collection1 where text='XXXX' group by str_s " +
           "having ((sum(field_i) = 19) AND (min(field_i) = 8))");
 
       solrStream = new SolrStream(jetty.url, sParams);
@@ -1829,11 +1968,11 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
       assert(tuple.getDouble("EXPR$2") == 19); //sum(field_i)
       assert(tuple.getDouble("EXPR$3") == 8); //min(field_i)
       assert(tuple.getDouble("EXPR$4") == 11); //max(field_i)
-      assert(tuple.getDouble("EXPR$5") == 9.5D); //avg(field_i)
+      assert(tuple.getDouble("EXPR$5") == 10); //avg(field_i)
 
       sParams = mapParams(CommonParams.QT, "/sql", "numWorkers", "2", "aggregationMode", "map_reduce",
           "stmt", "select str_s, count(*), sum(field_i), min(field_i), max(field_i), " +
-          "cast(avg(1.0 * field_i) as float) from collection1 where text='XXXX' group by str_s " +
+          "avg(field_i) from collection1 where text='XXXX' group by str_s " +
           "having ((sum(field_i) = 19) AND (min(field_i) = 100))");
 
       solrStream = new SolrStream(jetty.url, sParams);
@@ -1933,6 +2072,45 @@ public class TestSQLHandler extends AbstractFullDistribZkTestBase {
     assertTrue(maxf == 10.0D);
     assertTrue(avgf == 5.5D);
 
+
+    //Test without cast on average int field
+    sParams = mapParams(CommonParams.QT, "/sql",
+        "stmt", "select count(*) as myCount, sum(a_i) as mySum, min(a_i) as myMin, max(a_i) as myMax, " +
+            "avg(a_i) as myAvg, sum(a_f), min(a_f), max(a_f), avg(a_f) from collection1");
+
+    solrStream = new SolrStream(jetty.url, sParams);
+
+    tuples = getTuples(solrStream);
+
+    assert(tuples.size() == 1);
+
+    //Test Long and Double Sums
+
+    tuple = tuples.get(0);
+
+    count = tuple.getDouble("myCount");
+    sumi = tuple.getDouble("mySum");
+    mini = tuple.getDouble("myMin");
+    maxi = tuple.getDouble("myMax");
+    avgi = tuple.getDouble("myAvg");
+    assertTrue(tuple.get("myAvg") instanceof Long);
+    sumf = tuple.getDouble("EXPR$5"); //sum(a_f)
+    minf = tuple.getDouble("EXPR$6"); //min(a_f)
+    maxf = tuple.getDouble("EXPR$7"); //max(a_f)
+    avgf = tuple.getDouble("EXPR$8"); //avg(a_f)
+
+    assertTrue(count == 10);
+    assertTrue(mini == 0.0D);
+    assertTrue(maxi == 14.0D);
+    assertTrue(sumi == 70);
+    assertTrue(avgi == 7);
+    assertTrue(sumf == 55.0D);
+    assertTrue(minf == 1.0D);
+    assertTrue(maxf == 10.0D);
+    assertTrue(avgf == 5.5D);
+
+
+
     // Test where clause hits
     sParams = mapParams(CommonParams.QT, "/sql",
         "stmt", "select count(*), sum(a_i), min(a_i), max(a_i), cast(avg(1.0 * a_i) as float), sum(a_f), " +

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
index 94d937d..0180764 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/FacetStream.java
@@ -234,7 +234,6 @@ public class FacetStream extends TupleStream implements Expressible  {
     this.zkHost  = zkHost;
     this.params = params;
     this.buckets = buckets;
-    System.out.println("####### Bucket count:"+buckets.length);
     this.metrics = metrics;
     this.bucketSizeLimit   = bucketSizeLimit;
     this.collection = collection;
@@ -356,6 +355,7 @@ public class FacetStream extends TupleStream implements Expressible  {
       NamedList response = cloudSolrClient.request(request, collection);
       getTuples(response, buckets, metrics);
       Collections.sort(tuples, getStreamSort());
+
     } catch (Exception e) {
       throw new IOException(e);
     }
@@ -509,7 +509,11 @@ public class FacetStream extends TupleStream implements Expressible  {
           String identifier = metric.getIdentifier();
           if(!identifier.startsWith("count(")) {
             double d = (double)bucket.get("facet_"+m);
-            t.put(identifier, d);
+            if(metric.outputLong) {
+              t.put(identifier, Math.round(d));
+            } else {
+              t.put(identifier, d);
+            }
             ++m;
           } else {
             long l = ((Number)bucket.get("count")).longValue();

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StatsStream.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StatsStream.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StatsStream.java
index 6538902..f6b5818 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StatsStream.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/StatsStream.java
@@ -58,6 +58,7 @@ public class StatsStream extends TupleStream implements Expressible  {
   private String collection;
   private boolean done;
   private boolean doCount;
+  private Map<String, Metric> metricMap;
   protected transient SolrClientCache cache;
   protected transient CloudSolrClient cloudSolrClient;
 
@@ -82,6 +83,10 @@ public class StatsStream extends TupleStream implements Expressible  {
     this.params = params;
     this.metrics = metrics;
     this.collection = collection;
+    metricMap = new HashMap();
+    for(Metric metric : metrics) {
+      metricMap.put(metric.getIdentifier(), metric);
+    }
   }
 
   public StatsStream(StreamExpression expression, StreamFactory factory) throws IOException{   
@@ -321,7 +326,14 @@ public class StatsStream extends TupleStream implements Expressible  {
 
   private void addStat(Map<String, Object> map, String field, String stat, Object val) {
     if(stat.equals("mean")) {
-      map.put("avg("+field+")", val);
+      String name = "avg("+field+")";
+      Metric m = metricMap.get(name);
+      if(m.outputLong) {
+        Number num = (Number) val;
+        map.put(name, Math.round(num.doubleValue()));
+      } else {
+        map.put(name, val);
+      }
     } else {
       map.put(stat+"("+field+")", val);
     }

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MeanMetric.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MeanMetric.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MeanMetric.java
index 03c037a..14f93b8 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MeanMetric.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/MeanMetric.java
@@ -37,27 +37,36 @@ public class MeanMetric extends Metric {
   private long count;
 
   public MeanMetric(String columnName){
-    init("avg", columnName);
+    init("avg", columnName, false);
+  }
+
+  public MeanMetric(String columnName, boolean outputLong){
+    init("avg", columnName, outputLong);
   }
 
   public MeanMetric(StreamExpression expression, StreamFactory factory) throws IOException{
     // grab all parameters out
     String functionName = expression.getFunctionName();
     String columnName = factory.getValueOperand(expression, 0);
-    
+    String outputLong = factory.getValueOperand(expression, 1);
+
+
     // validate expression contains only what we want.
     if(null == columnName){
       throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - expected %s(columnName)", expression, functionName));
     }
-    if(1 != expression.getParameters().size()){
-      throw new IOException(String.format(Locale.ROOT,"Invalid expression %s - unknown operands found", expression));
+
+    boolean ol = false;
+    if(outputLong != null) {
+      ol = Boolean.parseBoolean(outputLong);
     }
     
-    init(functionName, columnName);    
+    init(functionName, columnName, ol);
   }
   
-  private void init(String functionName, String columnName){
+  private void init(String functionName, String columnName, boolean outputLong){
     this.columnName = columnName;
+    this.outputLong = outputLong;
     setFunctionName(functionName);
     setIdentifier(functionName, "(", columnName, ")");
   }
@@ -75,25 +84,29 @@ public class MeanMetric extends Metric {
   }
 
   public Metric newInstance() {
-    return new MeanMetric(columnName);
+    return new MeanMetric(columnName, outputLong);
   }
 
   public String[] getColumns() {
     return new String[]{columnName};
   }
 
-  public Double getValue() {
+  public Number getValue() {
     double dcount = (double)count;
     if(longSum == 0) {
       return doubleSum/dcount;
-
     } else {
-      return longSum/dcount;
+      double mean = longSum/dcount;
+      if(outputLong) {
+        return Math.round(mean);
+      } else {
+        return mean;
+      }
     }
   }
   
   @Override
   public StreamExpressionParameter toExpression(StreamFactory factory) throws IOException {
-    return new StreamExpression(getFunctionName()).withParameter(columnName);
+    return new StreamExpression(getFunctionName()).withParameter(columnName).withParameter(Boolean.toString(outputLong));
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/Metric.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/Metric.java b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/Metric.java
index 582b54a..87f7852 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/Metric.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/metrics/Metric.java
@@ -30,6 +30,7 @@ public abstract class Metric implements Expressible {
   private UUID metricNodeId = UUID.randomUUID();
   private String functionName;
   private String identifier;
+  public boolean outputLong; // This is only used for SQL in facet mode.
 
   public String getFunctionName(){
     return functionName;

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/aa2b46a6/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExpessionTest.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExpessionTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExpessionTest.java
index 4ddf4ce..0a597b7 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExpessionTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamExpressionToExpessionTest.java
@@ -155,7 +155,7 @@ public class StreamExpressionToExpessionTest extends LuceneTestCase {
     assertTrue(expressionString.contains("sort=\"a_f asc, a_i asc\""));
     assertTrue(expressionString.contains("min(a_i)"));
     assertTrue(expressionString.contains("max(a_i)"));
-    assertTrue(expressionString.contains("avg(a_i)"));
+    assertTrue(expressionString.contains("avg(a_i,false)"));
     assertTrue(expressionString.contains("count(*)"));
     assertTrue(expressionString.contains("sum(a_i)"));
     
@@ -274,8 +274,8 @@ public class StreamExpressionToExpessionTest extends LuceneTestCase {
     assertTrue(expressionString.contains("min(a_f)"));
     assertTrue(expressionString.contains("max(a_i)"));
     assertTrue(expressionString.contains("max(a_f)"));
-    assertTrue(expressionString.contains("avg(a_i)"));
-    assertTrue(expressionString.contains("avg(a_f)"));
+    assertTrue(expressionString.contains("avg(a_i,false)"));
+    assertTrue(expressionString.contains("avg(a_f,false)"));
     assertTrue(expressionString.contains("count(*)"));
   }
   
@@ -427,7 +427,7 @@ public class StreamExpressionToExpessionTest extends LuceneTestCase {
     metric = new MeanMetric(StreamExpressionParser.parse("avg(foo)"), factory);
     expressionString = metric.toExpression(factory).toString();
     
-    assertEquals("avg(foo)", expressionString);
+    assertEquals("avg(foo,false)", expressionString);
   }
   
   @Test


[37/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-7383: Replace DIH 'rss' example with 'atom' rss example was broken for multiple reasons. atom example showcases the same - and more - features and uses the smallest config file needed to make it work.

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/solrconfig.xml
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/solrconfig.xml b/solr/example/example-DIH/solr/rss/conf/solrconfig.xml
deleted file mode 100644
index eae60a7..0000000
--- a/solr/example/example-DIH/solr/rss/conf/solrconfig.xml
+++ /dev/null
@@ -1,1396 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<!-- 
-     For more details about configurations options that may appear in
-     this file, see http://wiki.apache.org/solr/SolrConfigXml. 
--->
-<config>
-  <!-- In all configuration below, a prefix of "solr." for class names
-       is an alias that causes solr to search appropriate packages,
-       including org.apache.solr.(search|update|request|core|analysis)
-
-       You may also specify a fully qualified Java classname if you
-       have your own custom plugins.
-    -->
-
-  <!-- Controls what version of Lucene various components of Solr
-       adhere to.  Generally, you want to use the latest version to
-       get all bug fixes and improvements. It is highly recommended
-       that you fully re-index after changing this setting as it can
-       affect both how text is indexed and queried.
-  -->
-  <luceneMatchVersion>7.0.0</luceneMatchVersion>
-
-  <!-- <lib/> directives can be used to instruct Solr to load any Jars
-       identified and use them to resolve any "plugins" specified in
-       your solrconfig.xml or schema.xml (ie: Analyzers, Request
-       Handlers, etc...).
-
-       All directories and paths are resolved relative to the
-       instanceDir.
-
-       Please note that <lib/> directives are processed in the order
-       that they appear in your solrconfig.xml file, and are "stacked" 
-       on top of each other when building a ClassLoader - so if you have 
-       plugin jars with dependencies on other jars, the "lower level" 
-       dependency jars should be loaded first.
-
-       If a "./lib" directory exists in your instanceDir, all files
-       found in it are included as if you had used the following
-       syntax...
-       
-              <lib dir="./lib" />
-    -->
-
-  <!-- A 'dir' option by itself adds any files found in the directory 
-       to the classpath, this is useful for including all jars in a
-       directory.
-
-       When a 'regex' is specified in addition to a 'dir', only the
-       files in that directory which completely match the regex
-       (anchored on both ends) will be included.
-
-       If a 'dir' option (with or without a regex) is used and nothing
-       is found that matches, a warning will be logged.
-
-       The examples below can be used to load some solr-contribs along 
-       with their external dependencies.
-    -->
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-dataimporthandler-.*\.jar" />
-
-  <lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
-
-  <lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
-
-  <lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
-  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
-
-  <!-- an exact 'path' can be used instead of a 'dir' to specify a 
-       specific jar file.  This will cause a serious error to be logged 
-       if it can't be loaded.
-    -->
-  <!--
-     <lib path="../a-jar-that-does-not-exist.jar" /> 
-  -->
-  
-  <!-- Data Directory
-
-       Used to specify an alternate directory to hold all index data
-       other than the default ./data under the Solr home.  If
-       replication is in use, this should match the replication
-       configuration.
-    -->
-  <dataDir>${solr.data.dir:}</dataDir>
-
-
-  <!-- The DirectoryFactory to use for indexes.
-       
-       solr.StandardDirectoryFactory is filesystem
-       based and tries to pick the best implementation for the current
-       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
-       wraps solr.StandardDirectoryFactory and caches small files in memory
-       for better NRT performance.
-
-       One can force a particular implementation via solr.MMapDirectoryFactory,
-       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
-
-       solr.RAMDirectoryFactory is memory based, not
-       persistent, and doesn't work with replication.
-    -->
-  <directoryFactory name="DirectoryFactory" 
-                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
-
-  <!-- The CodecFactory for defining the format of the inverted index.
-       The default implementation is SchemaCodecFactory, which is the official Lucene
-       index format, but hooks into the schema to provide per-field customization of
-       the postings lists and per-document values in the fieldType element
-       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
-       are experimental, so if you choose to customize the index format, it's a good
-       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
-       before upgrading to a newer version to avoid unnecessary reindexing.
-  -->
-  <codecFactory class="solr.SchemaCodecFactory"/>
-
-  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-       Index Config - These settings control low-level behavior of indexing
-       Most example settings here show the default value, but are commented
-       out, to more easily see where customizations have been made.
-       
-       Note: This replaces <indexDefaults> and <mainIndex> from older versions
-       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
-  <indexConfig>
-    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a 
-         LimitTokenCountFilterFactory in your fieldType definition. E.g. 
-     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
-    -->
-    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
-    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
-
-    <!-- Expert: Enabling compound file will use less files for the index, 
-         using fewer file descriptors on the expense of performance decrease. 
-         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
-    <!-- <useCompoundFile>false</useCompoundFile> -->
-
-    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
-         indexing for buffering added documents and deletions before they are
-         flushed to the Directory.
-         maxBufferedDocs sets a limit on the number of documents buffered
-         before flushing.
-         If both ramBufferSizeMB and maxBufferedDocs is set, then
-         Lucene will flush based on whichever limit is hit first.
-         The default is 100 MB.  -->
-    <!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
-    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
-
-    <!-- Expert: Merge Policy 
-         The Merge Policy in Lucene controls how merging of segments is done.
-         The default since Solr/Lucene 3.3 is TieredMergePolicy.
-         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
-         Even older versions of Lucene used LogDocMergePolicy.
-      -->
-    <!--
-        <mergePolicyFactory class="solr.TieredMergePolicyFactory">
-          <int name="maxMergeAtOnce">10</int>
-          <int name="segmentsPerTier">10</int>
-        </mergePolicyFactory>
-     -->
-
-    <!-- Expert: Merge Scheduler
-         The Merge Scheduler in Lucene controls how merges are
-         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
-         can perform merges in the background using separate threads.
-         The SerialMergeScheduler (Lucene 2.2 default) does not.
-     -->
-    <!-- 
-       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
-       -->
-
-    <!-- LockFactory 
-
-         This option specifies which Lucene LockFactory implementation
-         to use.
-      
-         single = SingleInstanceLockFactory - suggested for a
-                  read-only index or when there is no possibility of
-                  another process trying to modify the index.
-         native = NativeFSLockFactory - uses OS native file locking.
-                  Do not use when multiple solr webapps in the same
-                  JVM are attempting to share a single index.
-         simple = SimpleFSLockFactory  - uses a plain file for locking
-
-         Defaults: 'native' is default for Solr3.6 and later, otherwise
-                   'simple' is the default
-
-         More details on the nuances of each LockFactory...
-         http://wiki.apache.org/lucene-java/AvailableLockFactories
-    -->
-    <lockType>${solr.lock.type:native}</lockType>
-
-    <!-- Commit Deletion Policy
-         Custom deletion policies can be specified here. The class must
-         implement org.apache.lucene.index.IndexDeletionPolicy.
-
-         The default Solr IndexDeletionPolicy implementation supports
-         deleting index commit points on number of commits, age of
-         commit point and optimized status.
-         
-         The latest commit point should always be preserved regardless
-         of the criteria.
-    -->
-    <!-- 
-    <deletionPolicy class="solr.SolrDeletionPolicy">
-    -->
-      <!-- The number of commit points to be kept -->
-      <!-- <str name="maxCommitsToKeep">1</str> -->
-      <!-- The number of optimized commit points to be kept -->
-      <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
-      <!--
-          Delete all commit points once they have reached the given age.
-          Supports DateMathParser syntax e.g.
-        -->
-      <!--
-         <str name="maxCommitAge">30MINUTES</str>
-         <str name="maxCommitAge">1DAY</str>
-      -->
-    <!-- 
-    </deletionPolicy>
-    -->
-
-    <!-- Lucene Infostream
-       
-         To aid in advanced debugging, Lucene provides an "InfoStream"
-         of detailed information when indexing.
-
-         Setting the value to true will instruct the underlying Lucene
-         IndexWriter to write its info stream to solr's log. By default,
-         this is enabled here, and controlled through log4j.properties.
-      -->
-     <infoStream>true</infoStream>
-  </indexConfig>
-
-
-  <!-- JMX
-       
-       This example enables JMX if and only if an existing MBeanServer
-       is found, use this if you want to configure JMX through JVM
-       parameters. Remove this to disable exposing Solr configuration
-       and statistics to JMX.
-
-       For more details see http://wiki.apache.org/solr/SolrJmx
-    -->
-  <jmx />
-  <!-- If you want to connect to a particular server, specify the
-       agentId 
-    -->
-  <!-- <jmx agentId="myAgent" /> -->
-  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
-  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
-    -->
-
-  <!-- The default high-performance update handler -->
-  <updateHandler class="solr.DirectUpdateHandler2">
-
-    <!-- Enables a transaction log, used for real-time get, durability, and
-         and solr cloud replica recovery.  The log can grow as big as
-         uncommitted changes to the index, so use of a hard autoCommit
-         is recommended (see below).
-         "dir" - the target directory for transaction logs, defaults to the
-                solr data directory.  --> 
-    <updateLog>
-      <str name="dir">${solr.ulog.dir:}</str>
-    </updateLog>
- 
-    <!-- AutoCommit
-
-         Perform a hard commit automatically under certain conditions.
-         Instead of enabling autoCommit, consider using "commitWithin"
-         when adding documents. 
-
-         http://wiki.apache.org/solr/UpdateXmlMessages
-
-         maxDocs - Maximum number of documents to add since the last
-                   commit before automatically triggering a new commit.
-
-         maxTime - Maximum amount of time in ms that is allowed to pass
-                   since a document was added before automatically
-                   triggering a new commit. 
-         openSearcher - if false, the commit causes recent index changes
-           to be flushed to stable storage, but does not cause a new
-           searcher to be opened to make those changes visible.
-
-         If the updateLog is enabled, then it's highly recommended to
-         have some sort of hard autoCommit to limit the log size.
-      -->
-     <autoCommit> 
-       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime> 
-       <openSearcher>false</openSearcher> 
-     </autoCommit>
-
-    <!-- softAutoCommit is like autoCommit except it causes a
-         'soft' commit which only ensures that changes are visible
-         but does not ensure that data is synced to disk.  This is
-         faster and more near-realtime friendly than a hard commit.
-      -->
-
-     <autoSoftCommit> 
-       <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime> 
-     </autoSoftCommit>
-
-    <!-- Update Related Event Listeners
-         
-         Various IndexWriter related events can trigger Listeners to
-         take actions.
-
-         postCommit - fired after every commit or optimize command
-         postOptimize - fired after every optimize command
-      -->
-    <!-- The RunExecutableListener executes an external command from a
-         hook such as postCommit or postOptimize.
-         
-         exe - the name of the executable to run
-         dir - dir to use as the current working directory. (default=".")
-         wait - the calling thread waits until the executable returns. 
-                (default="true")
-         args - the arguments to pass to the program.  (default is none)
-         env - environment variables to set.  (default is none)
-      -->
-    <!-- This example shows how RunExecutableListener could be used
-         with the script based replication...
-         http://wiki.apache.org/solr/CollectionDistribution
-      -->
-    <!--
-       <listener event="postCommit" class="solr.RunExecutableListener">
-         <str name="exe">solr/bin/snapshooter</str>
-         <str name="dir">.</str>
-         <bool name="wait">true</bool>
-         <arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
-         <arr name="env"> <str>MYVAR=val1</str> </arr>
-       </listener>
-      -->
-
-  </updateHandler>
-  
-  <!-- IndexReaderFactory
-
-       Use the following format to specify a custom IndexReaderFactory,
-       which allows for alternate IndexReader implementations.
-
-       ** Experimental Feature **
-
-       Please note - Using a custom IndexReaderFactory may prevent
-       certain other features from working. The API to
-       IndexReaderFactory may change without warning or may even be
-       removed from future releases if the problems cannot be
-       resolved.
-
-
-       ** Features that may not work with custom IndexReaderFactory **
-
-       The ReplicationHandler assumes a disk-resident index. Using a
-       custom IndexReader implementation may cause incompatibility
-       with ReplicationHandler and may cause replication to not work
-       correctly. See SOLR-1366 for details.
-
-    -->
-  <!--
-  <indexReaderFactory name="IndexReaderFactory" class="package.class">
-    <str name="someArg">Some Value</str>
-  </indexReaderFactory >
-  -->
-
-  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-       Query section - these settings control query time things like caches
-       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
-  <query>
-    <!-- Max Boolean Clauses
-
-         Maximum number of clauses in each BooleanQuery,  an exception
-         is thrown if exceeded.
-
-         ** WARNING **
-         
-         This option actually modifies a global Lucene property that
-         will affect all SolrCores.  If multiple solrconfig.xml files
-         disagree on this property, the value at any given moment will
-         be based on the last SolrCore to be initialized.
-         
-      -->
-    <maxBooleanClauses>1024</maxBooleanClauses>
-
-
-    <!-- Solr Internal Query Caches
-
-         There are two implementations of cache available for Solr,
-         LRUCache, based on a synchronized LinkedHashMap, and
-         FastLRUCache, based on a ConcurrentHashMap.  
-
-         FastLRUCache has faster gets and slower puts in single
-         threaded operation and thus is generally faster than LRUCache
-         when the hit ratio of the cache is high (> 75%), and may be
-         faster under other scenarios on multi-cpu systems.
-    -->
-
-    <!-- Filter Cache
-
-         Cache used by SolrIndexSearcher for filters (DocSets),
-         unordered sets of *all* documents that match a query.  When a
-         new searcher is opened, its caches may be prepopulated or
-         "autowarmed" using data from caches in the old searcher.
-         autowarmCount is the number of items to prepopulate.  For
-         LRUCache, the autowarmed items will be the most recently
-         accessed items.
-
-         Parameters:
-           class - the SolrCache implementation LRUCache or
-               (LRUCache or FastLRUCache)
-           size - the maximum number of entries in the cache
-           initialSize - the initial capacity (number of entries) of
-               the cache.  (see java.util.HashMap)
-           autowarmCount - the number of entries to prepopulate from
-               and old cache.  
-      -->
-    <filterCache class="solr.FastLRUCache"
-                 size="512"
-                 initialSize="512"
-                 autowarmCount="0"/>
-
-    <!-- Query Result Cache
-         
-         Caches results of searches - ordered lists of document ids
-         (DocList) based on a query, a sort, and the range of documents requested.  
-      -->
-    <queryResultCache class="solr.LRUCache"
-                     size="512"
-                     initialSize="512"
-                     autowarmCount="0"/>
-   
-    <!-- Document Cache
-
-         Caches Lucene Document objects (the stored fields for each
-         document).  Since Lucene internal document ids are transient,
-         this cache will not be autowarmed.  
-      -->
-    <documentCache class="solr.LRUCache"
-                   size="512"
-                   initialSize="512"
-                   autowarmCount="0"/>
-    
-    <!-- custom cache currently used by block join --> 
-    <cache name="perSegFilter"
-      class="solr.search.LRUCache"
-      size="10"
-      initialSize="0"
-      autowarmCount="10"
-      regenerator="solr.NoOpRegenerator" />
-
-    <!-- Field Value Cache
-         
-         Cache used to hold field values that are quickly accessible
-         by document id.  The fieldValueCache is created by default
-         even if not configured here.
-      -->
-    <!--
-       <fieldValueCache class="solr.FastLRUCache"
-                        size="512"
-                        autowarmCount="128"
-                        showItems="32" />
-      -->
-
-    <!-- Custom Cache
-
-         Example of a generic cache.  These caches may be accessed by
-         name through SolrIndexSearcher.getCache(),cacheLookup(), and
-         cacheInsert().  The purpose is to enable easy caching of
-         user/application level data.  The regenerator argument should
-         be specified as an implementation of solr.CacheRegenerator 
-         if autowarming is desired.  
-      -->
-    <!--
-       <cache name="myUserCache"
-              class="solr.LRUCache"
-              size="4096"
-              initialSize="1024"
-              autowarmCount="1024"
-              regenerator="com.mycompany.MyRegenerator"
-              />
-      -->
-
-
-    <!-- Lazy Field Loading
-
-         If true, stored fields that are not requested will be loaded
-         lazily.  This can result in a significant speed improvement
-         if the usual case is to not load all stored fields,
-         especially if the skipped fields are large compressed text
-         fields.
-    -->
-    <enableLazyFieldLoading>true</enableLazyFieldLoading>
-
-   <!-- Use Filter For Sorted Query
-
-        A possible optimization that attempts to use a filter to
-        satisfy a search.  If the requested sort does not include
-        score, then the filterCache will be checked for a filter
-        matching the query. If found, the filter will be used as the
-        source of document ids, and then the sort will be applied to
-        that.
-
-        For most situations, this will not be useful unless you
-        frequently get the same search repeatedly with different sort
-        options, and none of them ever use "score"
-     -->
-   <!--
-      <useFilterForSortedQuery>true</useFilterForSortedQuery>
-     -->
-
-   <!-- Result Window Size
-
-        An optimization for use with the queryResultCache.  When a search
-        is requested, a superset of the requested number of document ids
-        are collected.  For example, if a search for a particular query
-        requests matching documents 10 through 19, and queryWindowSize is 50,
-        then documents 0 through 49 will be collected and cached.  Any further
-        requests in that range can be satisfied via the cache.  
-     -->
-   <queryResultWindowSize>20</queryResultWindowSize>
-
-   <!-- Maximum number of documents to cache for any entry in the
-        queryResultCache. 
-     -->
-   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
-
-   <!-- Query Related Event Listeners
-
-        Various IndexSearcher related events can trigger Listeners to
-        take actions.
-
-        newSearcher - fired whenever a new searcher is being prepared
-        and there is a current searcher handling requests (aka
-        registered).  It can be used to prime certain caches to
-        prevent long request times for certain requests.
-
-        firstSearcher - fired whenever a new searcher is being
-        prepared but there is no current registered searcher to handle
-        requests or to gain autowarming data from.
-
-        
-     -->
-    <!-- QuerySenderListener takes an array of NamedList and executes a
-         local query request for each NamedList in sequence. 
-      -->
-    <listener event="newSearcher" class="solr.QuerySenderListener">
-      <arr name="queries">
-        <!--
-           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
-           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
-          -->
-      </arr>
-    </listener>
-    <listener event="firstSearcher" class="solr.QuerySenderListener">
-      <arr name="queries">
-        <lst>
-          <str name="q">static firstSearcher warming in solrconfig.xml</str>
-        </lst>
-      </arr>
-    </listener>
-
-    <!-- Use Cold Searcher
-
-         If a search request comes in and there is no current
-         registered searcher, then immediately register the still
-         warming searcher and use it.  If "false" then all requests
-         will block until the first searcher is done warming.
-      -->
-    <useColdSearcher>false</useColdSearcher>
-
-  </query>
-
-
-  <!-- Request Dispatcher
-
-       This section contains instructions for how the SolrDispatchFilter
-       should behave when processing requests for this SolrCore.
-
-       handleSelect is a legacy option that affects the behavior of requests
-       such as /select?qt=XXX
-
-       handleSelect="true" will cause the SolrDispatchFilter to process
-       the request and dispatch the query to a handler specified by the 
-       "qt" param, assuming "/select" isn't already registered.
-
-       handleSelect="false" will cause the SolrDispatchFilter to
-       ignore "/select" requests, resulting in a 404 unless a handler
-       is explicitly registered with the name "/select"
-
-       handleSelect="true" is not recommended for new users, but is the default
-       for backwards compatibility
-    -->
-  <requestDispatcher handleSelect="false" >
-    <!-- Request Parsing
-
-         These settings indicate how Solr Requests may be parsed, and
-         what restrictions may be placed on the ContentStreams from
-         those requests
-
-         enableRemoteStreaming - enables use of the stream.file
-         and stream.url parameters for specifying remote streams.
-
-         multipartUploadLimitInKB - specifies the max size (in KiB) of
-         Multipart File Uploads that Solr will allow in a Request.
-         
-         formdataUploadLimitInKB - specifies the max size (in KiB) of
-         form data (application/x-www-form-urlencoded) sent via
-         POST. You can use POST to pass request parameters not
-         fitting into the URL.
-         
-         addHttpRequestToContext - if set to true, it will instruct
-         the requestParsers to include the original HttpServletRequest
-         object in the context map of the SolrQueryRequest under the 
-         key "httpRequest". It will not be used by any of the existing
-         Solr components, but may be useful when developing custom 
-         plugins.
-         
-         *** WARNING ***
-         The settings below authorize Solr to fetch remote files, You
-         should make sure your system has some authentication before
-         using enableRemoteStreaming="true"
-
-      --> 
-    <requestParsers enableRemoteStreaming="true" 
-                    multipartUploadLimitInKB="2048000"
-                    formdataUploadLimitInKB="2048"
-                    addHttpRequestToContext="false"/>
-
-    <!-- HTTP Caching
-
-         Set HTTP caching related parameters (for proxy caches and clients).
-
-         The options below instruct Solr not to output any HTTP Caching
-         related headers
-      -->
-    <httpCaching never304="true" />
-    <!-- If you include a <cacheControl> directive, it will be used to
-         generate a Cache-Control header (as well as an Expires header
-         if the value contains "max-age=")
-         
-         By default, no Cache-Control header is generated.
-         
-         You can use the <cacheControl> option even if you have set
-         never304="true"
-      -->
-    <!--
-       <httpCaching never304="true" >
-         <cacheControl>max-age=30, public</cacheControl> 
-       </httpCaching>
-      -->
-    <!-- To enable Solr to respond with automatically generated HTTP
-         Caching headers, and to response to Cache Validation requests
-         correctly, set the value of never304="false"
-         
-         This will cause Solr to generate Last-Modified and ETag
-         headers based on the properties of the Index.
-
-         The following options can also be specified to affect the
-         values of these headers...
-
-         lastModFrom - the default value is "openTime" which means the
-         Last-Modified value (and validation against If-Modified-Since
-         requests) will all be relative to when the current Searcher
-         was opened.  You can change it to lastModFrom="dirLastMod" if
-         you want the value to exactly correspond to when the physical
-         index was last modified.
-
-         etagSeed="..." is an option you can change to force the ETag
-         header (and validation against If-None-Match requests) to be
-         different even if the index has not changed (ie: when making
-         significant changes to your config file)
-
-         (lastModifiedFrom and etagSeed are both ignored if you use
-         the never304="true" option)
-      -->
-    <!--
-       <httpCaching lastModifiedFrom="openTime"
-                    etagSeed="Solr">
-         <cacheControl>max-age=30, public</cacheControl> 
-       </httpCaching>
-      -->
-  </requestDispatcher>
-
-  <!-- Request Handlers 
-
-       http://wiki.apache.org/solr/SolrRequestHandler
-
-       Incoming queries will be dispatched to a specific handler by name
-       based on the path specified in the request.
-
-       Legacy behavior: If the request path uses "/select" but no Request
-       Handler has that name, and if handleSelect="true" has been specified in
-       the requestDispatcher, then the Request Handler is dispatched based on
-       the qt parameter.  Handlers without a leading '/' are accessed this way
-       like so: http://host/app/[core/]select?qt=name  If no qt is
-       given, then the requestHandler that declares default="true" will be
-       used or the one named "standard".
-
-       If a Request Handler is declared with startup="lazy", then it will
-       not be initialized until the first request that uses it.
-
-    -->
-
-  <requestHandler name="/dataimport" class="solr.DataImportHandler">
-    <lst name="defaults">
-      <str name="config">rss-data-config.xml</str>
-    </lst>
-  </requestHandler>
-
-  <!-- SearchHandler
-
-       http://wiki.apache.org/solr/SearchHandler
-
-       For processing Search Queries, the primary Request Handler
-       provided with Solr is "SearchHandler" It delegates to a sequent
-       of SearchComponents (see below) and supports distributed
-       queries across multiple shards
-    -->
-  <requestHandler name="/select" class="solr.SearchHandler">
-    <!-- default values for query parameters can be specified, these
-         will be overridden by parameters in the request
-      -->
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-       <int name="rows">10</int>
-       <str name="df">text</str>
-     </lst>
-    <!-- In addition to defaults, "appends" params can be specified
-         to identify values which should be appended to the list of
-         multi-val params from the query (or the existing "defaults").
-      -->
-    <!-- In this example, the param "fq=instock:true" would be appended to
-         any query time fq params the user may specify, as a mechanism for
-         partitioning the index, independent of any user selected filtering
-         that may also be desired (perhaps as a result of faceted searching).
-
-         NOTE: there is *absolutely* nothing a client can do to prevent these
-         "appends" values from being used, so don't use this mechanism
-         unless you are sure you always want it.
-      -->
-    <!--
-       <lst name="appends">
-         <str name="fq">inStock:true</str>
-       </lst>
-      -->
-    <!-- "invariants" are a way of letting the Solr maintainer lock down
-         the options available to Solr clients.  Any params values
-         specified here are used regardless of what values may be specified
-         in either the query, the "defaults", or the "appends" params.
-
-         In this example, the facet.field and facet.query params would
-         be fixed, limiting the facets clients can use.  Faceting is
-         not turned on by default - but if the client does specify
-         facet=true in the request, these are the only facets they
-         will be able to see counts for; regardless of what other
-         facet.field or facet.query params they may specify.
-
-         NOTE: there is *absolutely* nothing a client can do to prevent these
-         "invariants" values from being used, so don't use this mechanism
-         unless you are sure you always want it.
-      -->
-    <!--
-       <lst name="invariants">
-         <str name="facet.field">cat</str>
-         <str name="facet.field">manu_exact</str>
-         <str name="facet.query">price:[* TO 500]</str>
-         <str name="facet.query">price:[500 TO *]</str>
-       </lst>
-      -->
-    <!-- If the default list of SearchComponents is not desired, that
-         list can either be overridden completely, or components can be
-         prepended or appended to the default list.  (see below)
-      -->
-    <!--
-       <arr name="components">
-         <str>nameOfCustomComponent1</str>
-         <str>nameOfCustomComponent2</str>
-       </arr>
-      -->
-    </requestHandler>
-
-  <!-- A request handler that returns indented JSON by default -->
-  <requestHandler name="/query" class="solr.SearchHandler">
-     <lst name="defaults">
-       <str name="echoParams">explicit</str>
-       <str name="wt">json</str>
-       <str name="indent">true</str>
-       <str name="df">text</str>
-     </lst>
-  </requestHandler>
-
-  <!-- A Robust Example
-
-       This example SearchHandler declaration shows off usage of the
-       SearchHandler with many defaults declared
-
-       Note that multiple instances of the same Request Handler
-       (SearchHandler) can be registered multiple times with different
-       names (and different init parameters)
-    -->
-  <requestHandler name="/browse" class="solr.SearchHandler">
-    <lst name="defaults">
-      <str name="echoParams">explicit</str>
-
-      <!-- VelocityResponseWriter settings -->
-      <str name="wt">velocity</str>
-      <str name="v.template">browse</str>
-      <str name="v.layout">layout</str>
-
-      <!-- Query settings -->
-      <str name="defType">edismax</str>
-      <str name="q.alt">*:*</str>
-      <str name="rows">10</str>
-      <str name="fl">*,score</str>
-
-      <!-- Faceting defaults -->
-      <str name="facet">on</str>
-      <str name="facet.mincount">1</str>
-    </lst>
-  </requestHandler>
-
-  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse">
-    <lst name="defaults">
-      <str name="df">text</str>
-    </lst>
-  </initParams>
-
-  <!-- Solr Cell Update Request Handler
-
-       http://wiki.apache.org/solr/ExtractingRequestHandler 
-
-    -->
-  <requestHandler name="/update/extract" 
-                  startup="lazy"
-                  class="solr.extraction.ExtractingRequestHandler" >
-    <lst name="defaults">
-      <str name="lowernames">true</str>
-      <str name="uprefix">ignored_</str>
-
-      <!-- capture link hrefs but ignore div attributes -->
-      <str name="captureAttr">true</str>
-      <str name="fmap.a">links</str>
-      <str name="fmap.div">ignored_</str>
-    </lst>
-  </requestHandler>
-
-  <!-- Search Components
-
-       Search components are registered to SolrCore and used by 
-       instances of SearchHandler (which can access them by name)
-       
-       By default, the following components are available:
-       
-       <searchComponent name="query"     class="solr.QueryComponent" />
-       <searchComponent name="facet"     class="solr.FacetComponent" />
-       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
-       <searchComponent name="highlight" class="solr.HighlightComponent" />
-       <searchComponent name="stats"     class="solr.StatsComponent" />
-       <searchComponent name="debug"     class="solr.DebugComponent" />
-   
-       Default configuration in a requestHandler would look like:
-
-       <arr name="components">
-         <str>query</str>
-         <str>facet</str>
-         <str>mlt</str>
-         <str>highlight</str>
-         <str>stats</str>
-         <str>debug</str>
-       </arr>
-
-       If you register a searchComponent to one of the standard names, 
-       that will be used instead of the default.
-
-       To insert components before or after the 'standard' components, use:
-    
-       <arr name="first-components">
-         <str>myFirstComponentName</str>
-       </arr>
-    
-       <arr name="last-components">
-         <str>myLastComponentName</str>
-       </arr>
-
-       NOTE: The component registered with the name "debug" will
-       always be executed after the "last-components" 
-       
-     -->
-  
-   <!-- Spell Check
-
-        The spell check component can return a list of alternative spelling
-        suggestions.  
-
-        http://wiki.apache.org/solr/SpellCheckComponent
-     -->
-  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
-
-    <str name="queryAnalyzerFieldType">text_general</str>
-
-    <!-- Multiple "Spell Checkers" can be declared and used by this
-         component
-      -->
-
-    <!-- a spellchecker built from a field of the main index -->
-    <lst name="spellchecker">
-      <str name="name">default</str>
-      <str name="field">text</str>
-      <str name="classname">solr.DirectSolrSpellChecker</str>
-      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
-      <str name="distanceMeasure">internal</str>
-      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
-      <float name="accuracy">0.5</float>
-      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
-      <int name="maxEdits">2</int>
-      <!-- the minimum shared prefix when enumerating terms -->
-      <int name="minPrefix">1</int>
-      <!-- maximum number of inspections per result. -->
-      <int name="maxInspections">5</int>
-      <!-- minimum length of a query term to be considered for correction -->
-      <int name="minQueryLength">4</int>
-      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
-      <float name="maxQueryFrequency">0.01</float>
-      <!-- uncomment this to require suggestions to occur in 1% of the documents
-        <float name="thresholdTokenFrequency">.01</float>
-      -->
-    </lst>
-    
-    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
-    <lst name="spellchecker">
-      <str name="name">wordbreak</str>
-      <str name="classname">solr.WordBreakSolrSpellChecker</str>      
-      <str name="field">name</str>
-      <str name="combineWords">true</str>
-      <str name="breakWords">true</str>
-      <int name="maxChanges">10</int>
-    </lst>
-
-    <!-- a spellchecker that uses a different distance measure -->
-    <!--
-       <lst name="spellchecker">
-         <str name="name">jarowinkler</str>
-         <str name="field">spell</str>
-         <str name="classname">solr.DirectSolrSpellChecker</str>
-         <str name="distanceMeasure">
-           org.apache.lucene.search.spell.JaroWinklerDistance
-         </str>
-       </lst>
-     -->
-
-    <!-- a spellchecker that use an alternate comparator 
-
-         comparatorClass be one of:
-          1. score (default)
-          2. freq (Frequency first, then score)
-          3. A fully qualified class name
-      -->
-    <!--
-       <lst name="spellchecker">
-         <str name="name">freq</str>
-         <str name="field">lowerfilt</str>
-         <str name="classname">solr.DirectSolrSpellChecker</str>
-         <str name="comparatorClass">freq</str>
-      -->
-
-    <!-- A spellchecker that reads the list of words from a file -->
-    <!--
-       <lst name="spellchecker">
-         <str name="classname">solr.FileBasedSpellChecker</str>
-         <str name="name">file</str>
-         <str name="sourceLocation">spellings.txt</str>
-         <str name="characterEncoding">UTF-8</str>
-         <str name="spellcheckIndexDir">spellcheckerFile</str>
-       </lst>
-      -->
-  </searchComponent>
-  
-  <!-- A request handler for demonstrating the spellcheck component.  
-
-       NOTE: This is purely as an example.  The whole purpose of the
-       SpellCheckComponent is to hook it into the request handler that
-       handles your normal user queries so that a separate request is
-       not needed to get suggestions.
-
-       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
-       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
-       
-       See http://wiki.apache.org/solr/SpellCheckComponent for details
-       on the request parameters.
-    -->
-  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
-    <lst name="defaults">
-      <str name="df">text</str>
-      <!-- Solr will use suggestions from both the 'default' spellchecker
-           and from the 'wordbreak' spellchecker and combine them.
-           collations (re-written queries) can include a combination of
-           corrections from both spellcheckers -->
-      <str name="spellcheck.dictionary">default</str>
-      <str name="spellcheck.dictionary">wordbreak</str>
-      <str name="spellcheck">on</str>
-      <str name="spellcheck.extendedResults">true</str>       
-      <str name="spellcheck.count">10</str>
-      <str name="spellcheck.alternativeTermCount">5</str>
-      <str name="spellcheck.maxResultsForSuggest">5</str>       
-      <str name="spellcheck.collate">true</str>
-      <str name="spellcheck.collateExtendedResults">true</str>  
-      <str name="spellcheck.maxCollationTries">10</str>
-      <str name="spellcheck.maxCollations">5</str>         
-    </lst>
-    <arr name="last-components">
-      <str>spellcheck</str>
-    </arr>
-  </requestHandler>
-
-  <searchComponent name="suggest" class="solr.SuggestComponent">
-    <lst name="suggester">
-      <str name="name">mySuggester</str>
-      <str name="lookupImpl">FuzzyLookupFactory</str>      <!-- org.apache.solr.spelling.suggest.fst -->
-      <str name="dictionaryImpl">DocumentDictionaryFactory</str>     <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory --> 
-      <str name="field">cat</str>
-      <str name="weightField">price</str>
-      <str name="suggestAnalyzerFieldType">string</str>
-    </lst>
-  </searchComponent>
-
-  <requestHandler name="/suggest" class="solr.SearchHandler" startup="lazy">
-    <lst name="defaults">
-      <str name="suggest">true</str>
-      <str name="suggest.count">10</str>
-    </lst>
-    <arr name="components">
-      <str>suggest</str>
-    </arr>
-  </requestHandler>
-  <!-- Term Vector Component
-
-       http://wiki.apache.org/solr/TermVectorComponent
-    -->
-  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
-
-  <!-- A request handler for demonstrating the term vector component
-
-       This is purely as an example.
-
-       In reality you will likely want to add the component to your 
-       already specified request handlers. 
-    -->
-  <requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
-    <lst name="defaults">
-      <str name="df">text</str>
-      <bool name="tv">true</bool>
-    </lst>
-    <arr name="last-components">
-      <str>tvComponent</str>
-    </arr>
-  </requestHandler>
-
-  <!-- Terms Component
-
-       http://wiki.apache.org/solr/TermsComponent
-
-       A component to return terms and document frequency of those
-       terms
-    -->
-  <searchComponent name="terms" class="solr.TermsComponent"/>
-
-  <!-- A request handler for demonstrating the terms component -->
-  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
-     <lst name="defaults">
-      <bool name="terms">true</bool>
-      <bool name="distrib">false</bool>
-    </lst>     
-    <arr name="components">
-      <str>terms</str>
-    </arr>
-  </requestHandler>
-
-
-  <!-- Query Elevation Component
-
-       http://wiki.apache.org/solr/QueryElevationComponent
-
-       a search component that enables you to configure the top
-       results for a given query regardless of the normal lucene
-       scoring.
-    -->
-  <searchComponent name="elevator" class="solr.QueryElevationComponent" >
-    <!-- pick a fieldType to analyze queries -->
-    <str name="queryFieldType">string</str>
-    <str name="config-file">elevate.xml</str>
-  </searchComponent>
-
-  <!-- A request handler for demonstrating the elevator component -->
-  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
-    <lst name="defaults">
-      <str name="echoParams">explicit</str>
-      <str name="df">text</str>
-    </lst>
-    <arr name="last-components">
-      <str>elevator</str>
-    </arr>
-  </requestHandler>
-
-  <!-- Highlighting Component
-
-       http://wiki.apache.org/solr/HighlightingParameters
-    -->
-  <searchComponent class="solr.HighlightComponent" name="highlight">
-    <highlighting>
-      <!-- Configure the standard fragmenter -->
-      <!-- This could most likely be commented out in the "default" case -->
-      <fragmenter name="gap" 
-                  default="true"
-                  class="solr.highlight.GapFragmenter">
-        <lst name="defaults">
-          <int name="hl.fragsize">100</int>
-        </lst>
-      </fragmenter>
-
-      <!-- A regular-expression-based fragmenter 
-           (for sentence extraction) 
-        -->
-      <fragmenter name="regex" 
-                  class="solr.highlight.RegexFragmenter">
-        <lst name="defaults">
-          <!-- slightly smaller fragsizes work better because of slop -->
-          <int name="hl.fragsize">70</int>
-          <!-- allow 50% slop on fragment sizes -->
-          <float name="hl.regex.slop">0.5</float>
-          <!-- a basic sentence pattern -->
-          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
-        </lst>
-      </fragmenter>
-
-      <!-- Configure the standard formatter -->
-      <formatter name="html" 
-                 default="true"
-                 class="solr.highlight.HtmlFormatter">
-        <lst name="defaults">
-          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
-          <str name="hl.simple.post"><![CDATA[</em>]]></str>
-        </lst>
-      </formatter>
-
-      <!-- Configure the standard encoder -->
-      <encoder name="html" 
-               class="solr.highlight.HtmlEncoder" />
-
-      <!-- Configure the standard fragListBuilder -->
-      <fragListBuilder name="simple" 
-                       class="solr.highlight.SimpleFragListBuilder"/>
-      
-      <!-- Configure the single fragListBuilder -->
-      <fragListBuilder name="single" 
-                       class="solr.highlight.SingleFragListBuilder"/>
-      
-      <!-- Configure the weighted fragListBuilder -->
-      <fragListBuilder name="weighted" 
-                       default="true"
-                       class="solr.highlight.WeightedFragListBuilder"/>
-      
-      <!-- default tag FragmentsBuilder -->
-      <fragmentsBuilder name="default" 
-                        default="true"
-                        class="solr.highlight.ScoreOrderFragmentsBuilder">
-        <!-- 
-        <lst name="defaults">
-          <str name="hl.multiValuedSeparatorChar">/</str>
-        </lst>
-        -->
-      </fragmentsBuilder>
-
-      <!-- multi-colored tag FragmentsBuilder -->
-      <fragmentsBuilder name="colored" 
-                        class="solr.highlight.ScoreOrderFragmentsBuilder">
-        <lst name="defaults">
-          <str name="hl.tag.pre"><![CDATA[
-               <b style="background:yellow">,<b style="background:lawgreen">,
-               <b style="background:aquamarine">,<b style="background:magenta">,
-               <b style="background:palegreen">,<b style="background:coral">,
-               <b style="background:wheat">,<b style="background:khaki">,
-               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
-          <str name="hl.tag.post"><![CDATA[</b>]]></str>
-        </lst>
-      </fragmentsBuilder>
-      
-      <boundaryScanner name="default" 
-                       default="true"
-                       class="solr.highlight.SimpleBoundaryScanner">
-        <lst name="defaults">
-          <str name="hl.bs.maxScan">10</str>
-          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
-        </lst>
-      </boundaryScanner>
-      
-      <boundaryScanner name="breakIterator" 
-                       class="solr.highlight.BreakIteratorBoundaryScanner">
-        <lst name="defaults">
-          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
-          <str name="hl.bs.type">WORD</str>
-          <!-- language and country are used when constructing Locale object.  -->
-          <!-- And the Locale object will be used when getting instance of BreakIterator -->
-          <str name="hl.bs.language">en</str>
-          <str name="hl.bs.country">US</str>
-        </lst>
-      </boundaryScanner>
-    </highlighting>
-  </searchComponent>
-
-  <!-- Update Processors
-
-       Chains of Update Processor Factories for dealing with Update
-       Requests can be declared, and then used by name in Update
-       Request Processors
-
-       http://wiki.apache.org/solr/UpdateRequestProcessor
-
-    --> 
-  <!-- Deduplication
-
-       An example dedup update processor that creates the "id" field
-       on the fly based on the hash code of some other fields.  This
-       example has overwriteDupes set to false since we are using the
-       id field as the signatureField and Solr will maintain
-       uniqueness based on that anyway.  
-       
-    -->
-  <!--
-     <updateRequestProcessorChain name="dedupe">
-       <processor class="solr.processor.SignatureUpdateProcessorFactory">
-         <bool name="enabled">true</bool>
-         <str name="signatureField">id</str>
-         <bool name="overwriteDupes">false</bool>
-         <str name="fields">name,features,cat</str>
-         <str name="signatureClass">solr.processor.Lookup3Signature</str>
-       </processor>
-       <processor class="solr.LogUpdateProcessorFactory" />
-       <processor class="solr.RunUpdateProcessorFactory" />
-     </updateRequestProcessorChain>
-    -->
-  
-  <!-- Language identification
-
-       This example update chain identifies the language of the incoming
-       documents using the langid contrib. The detected language is
-       written to field language_s. No field name mapping is done.
-       The fields used for detection are text, title, subject and description,
-       making this example suitable for detecting languages form full-text
-       rich documents injected via ExtractingRequestHandler.
-       See more about langId at http://wiki.apache.org/solr/LanguageDetection
-    -->
-    <!--
-     <updateRequestProcessorChain name="langid">
-       <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
-         <str name="langid.fl">text,title,subject,description</str>
-         <str name="langid.langField">language_s</str>
-         <str name="langid.fallback">en</str>
-       </processor>
-       <processor class="solr.LogUpdateProcessorFactory" />
-       <processor class="solr.RunUpdateProcessorFactory" />
-     </updateRequestProcessorChain>
-    -->
-
-  <!-- Script update processor
-
-    This example hooks in an update processor implemented using JavaScript.
-
-    See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
-  -->
-  <!--
-    <updateRequestProcessorChain name="script">
-      <processor class="solr.StatelessScriptUpdateProcessorFactory">
-        <str name="script">update-script.js</str>
-        <lst name="params">
-          <str name="config_param">example config parameter</str>
-        </lst>
-      </processor>
-      <processor class="solr.RunUpdateProcessorFactory" />
-    </updateRequestProcessorChain>
-  -->
- 
-  <!-- Response Writers
-
-       http://wiki.apache.org/solr/QueryResponseWriter
-
-       Request responses will be written using the writer specified by
-       the 'wt' request parameter matching the name of a registered
-       writer.
-
-       The "default" writer is the default and will be used if 'wt' is
-       not specified in the request.
-    -->
-  <!-- The following response writers are implicitly configured unless
-       overridden...
-    -->
-  <!--
-     <queryResponseWriter name="xml" 
-                          default="true"
-                          class="solr.XMLResponseWriter" />
-     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
-     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
-     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
-     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
-     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
-     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
-     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
-    -->
-
-  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
-     <!-- For the purposes of the tutorial, JSON responses are written as
-      plain text so that they are easy to read in *any* browser.
-      If you expect a MIME type of "application/json" just remove this override.
-     -->
-    <str name="content-type">text/plain; charset=UTF-8</str>
-  </queryResponseWriter>
-  
-  <!--
-     Custom response writers can be declared as needed...
-    -->
-  <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
-    <str name="template.base.dir">${velocity.template.base.dir:}</str>
-  </queryResponseWriter>
-
-  <!-- XSLT response writer transforms the XML output by any xslt file found
-       in Solr's conf/xslt directory.  Changes to xslt files are checked for
-       every xsltCacheLifetimeSeconds.  
-    -->
-  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
-    <int name="xsltCacheLifetimeSeconds">5</int>
-  </queryResponseWriter>
-
-  <!-- Query Parsers
-
-       https://cwiki.apache.org/confluence/display/solr/Query+Syntax+and+Parsing
-
-       Multiple QParserPlugins can be registered by name, and then
-       used in either the "defType" param for the QueryComponent (used
-       by SearchHandler) or in LocalParams
-    -->
-  <!-- example of registering a query parser -->
-  <!--
-     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
-    -->
-
-  <!-- Function Parsers
-
-       http://wiki.apache.org/solr/FunctionQuery
-
-       Multiple ValueSourceParsers can be registered by name, and then
-       used as function names when using the "func" QParser.
-    -->
-  <!-- example of registering a custom function parser  -->
-  <!--
-     <valueSourceParser name="myfunc" 
-                        class="com.mycompany.MyValueSourceParser" />
-    -->
-    
-  
-  <!-- Document Transformers
-       http://wiki.apache.org/solr/DocTransformers
-    -->
-  <!--
-     Could be something like:
-     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
-       <int name="connection">jdbc://....</int>
-     </transformer>
-     
-     To add a constant value to all docs, use:
-     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
-       <int name="value">5</int>
-     </transformer>
-     
-     If you want the user to still be able to change it with _value:something_ use this:
-     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
-       <double name="defaultValue">5</double>
-     </transformer>
-
-      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
-      EditorialMarkerFactory will do exactly that:
-     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
-    -->
-
-</config>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/spellings.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/spellings.txt b/solr/example/example-DIH/solr/rss/conf/spellings.txt
deleted file mode 100644
index d7ede6f..0000000
--- a/solr/example/example-DIH/solr/rss/conf/spellings.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-pizza
-history
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/stopwords.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/stopwords.txt b/solr/example/example-DIH/solr/rss/conf/stopwords.txt
deleted file mode 100644
index ae1e83e..0000000
--- a/solr/example/example-DIH/solr/rss/conf/stopwords.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/synonyms.txt
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/synonyms.txt b/solr/example/example-DIH/solr/rss/conf/synonyms.txt
deleted file mode 100644
index eab4ee8..0000000
--- a/solr/example/example-DIH/solr/rss/conf/synonyms.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#-----------------------------------------------------------------------
-#some test synonym mappings unlikely to appear in real input text
-aaafoo => aaabar
-bbbfoo => bbbfoo bbbbar
-cccfoo => cccbar cccbaz
-fooaaa,baraaa,bazaaa
-
-# Some synonym groups specific to this example
-GB,gib,gigabyte,gigabytes
-MB,mib,megabyte,megabytes
-Television, Televisions, TV, TVs
-#notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming
-#after us won't split it into two words.
-
-# Synonym mappings can be used for spelling correction too
-pixima => pixma
-

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/update-script.js
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/update-script.js b/solr/example/example-DIH/solr/rss/conf/update-script.js
deleted file mode 100644
index 49b07f9..0000000
--- a/solr/example/example-DIH/solr/rss/conf/update-script.js
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-  This is a basic skeleton JavaScript update processor.
-
-  In order for this to be executed, it must be properly wired into solrconfig.xml; by default it is commented out in
-  the example solrconfig.xml and must be uncommented to be enabled.
-
-  See http://wiki.apache.org/solr/ScriptUpdateProcessor for more details.
-*/
-
-function processAdd(cmd) {
-
-  doc = cmd.solrDoc;  // org.apache.solr.common.SolrInputDocument
-  id = doc.getFieldValue("id");
-  logger.info("update-script#processAdd: id=" + id);
-
-// Set a field value:
-//  doc.setField("foo_s", "whatever");
-
-// Get a configuration parameter:
-//  config_param = params.get('config_param');  // "params" only exists if processor configured with <lst name="params">
-
-// Get a request parameter:
-// some_param = req.getParams().get("some_param")
-
-// Add a field of field names that match a pattern:
-//   - Potentially useful to determine the fields/attributes represented in a result set, via faceting on field_name_ss
-//  field_names = doc.getFieldNames().toArray();
-//  for(i=0; i < field_names.length; i++) {
-//    field_name = field_names[i];
-//    if (/attr_.*/.test(field_name)) { doc.addField("attribute_ss", field_names[i]); }
-//  }
-
-}
-
-function processDelete(cmd) {
-  // no-op
-}
-
-function processMergeIndexes(cmd) {
-  // no-op
-}
-
-function processCommit(cmd) {
-  // no-op
-}
-
-function processRollback(cmd) {
-  // no-op
-}
-
-function finish() {
-  // no-op
-}

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/xslt/example.xsl
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/xslt/example.xsl b/solr/example/example-DIH/solr/rss/conf/xslt/example.xsl
deleted file mode 100644
index b899270..0000000
--- a/solr/example/example-DIH/solr/rss/conf/xslt/example.xsl
+++ /dev/null
@@ -1,132 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-
-<!-- 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- -->
-
-<!-- 
-  Simple transform of Solr query results to HTML
- -->
-<xsl:stylesheet version='1.0'
-    xmlns:xsl='http://www.w3.org/1999/XSL/Transform'
->
-
-  <xsl:output media-type="text/html" encoding="UTF-8"/> 
-  
-  <xsl:variable name="title" select="concat('Solr search results (',response/result/@numFound,' documents)')"/>
-  
-  <xsl:template match='/'>
-    <html>
-      <head>
-        <title><xsl:value-of select="$title"/></title>
-        <xsl:call-template name="css"/>
-      </head>
-      <body>
-        <h1><xsl:value-of select="$title"/></h1>
-        <div class="note">
-          This has been formatted by the sample "example.xsl" transform -
-          use your own XSLT to get a nicer page
-        </div>
-        <xsl:apply-templates select="response/result/doc"/>
-      </body>
-    </html>
-  </xsl:template>
-  
-  <xsl:template match="doc">
-    <xsl:variable name="pos" select="position()"/>
-    <div class="doc">
-      <table width="100%">
-        <xsl:apply-templates>
-          <xsl:with-param name="pos"><xsl:value-of select="$pos"/></xsl:with-param>
-        </xsl:apply-templates>
-      </table>
-    </div>
-  </xsl:template>
-
-  <xsl:template match="doc/*[@name='score']" priority="100">
-    <xsl:param name="pos"></xsl:param>
-    <tr>
-      <td class="name">
-        <xsl:value-of select="@name"/>
-      </td>
-      <td class="value">
-        <xsl:value-of select="."/>
-
-        <xsl:if test="boolean(//lst[@name='explain'])">
-          <xsl:element name="a">
-            <!-- can't allow whitespace here -->
-            <xsl:attribute name="href">javascript:toggle("<xsl:value-of select="concat('exp-',$pos)" />");</xsl:attribute>?</xsl:element>
-          <br/>
-          <xsl:element name="div">
-            <xsl:attribute name="class">exp</xsl:attribute>
-            <xsl:attribute name="id">
-              <xsl:value-of select="concat('exp-',$pos)" />
-            </xsl:attribute>
-            <xsl:value-of select="//lst[@name='explain']/str[position()=$pos]"/>
-          </xsl:element>
-        </xsl:if>
-      </td>
-    </tr>
-  </xsl:template>
-
-  <xsl:template match="doc/arr" priority="100">
-    <tr>
-      <td class="name">
-        <xsl:value-of select="@name"/>
-      </td>
-      <td class="value">
-        <ul>
-        <xsl:for-each select="*">
-          <li><xsl:value-of select="."/></li>
-        </xsl:for-each>
-        </ul>
-      </td>
-    </tr>
-  </xsl:template>
-
-
-  <xsl:template match="doc/*">
-    <tr>
-      <td class="name">
-        <xsl:value-of select="@name"/>
-      </td>
-      <td class="value">
-        <xsl:value-of select="."/>
-      </td>
-    </tr>
-  </xsl:template>
-
-  <xsl:template match="*"/>
-  
-  <xsl:template name="css">
-    <script>
-      function toggle(id) {
-        var obj = document.getElementById(id);
-        obj.style.display = (obj.style.display != 'block') ? 'block' : 'none';
-      }
-    </script>
-    <style type="text/css">
-      body { font-family: "Lucida Grande", sans-serif }
-      td.name { font-style: italic; font-size:80%; }
-      td { vertical-align: top; }
-      ul { margin: 0px; margin-left: 1em; padding: 0px; }
-      .note { font-size:80%; }
-      .doc { margin-top: 1em; border-top: solid grey 1px; }
-      .exp { display: none; font-family: monospace; white-space: pre; }
-    </style>
-  </xsl:template>
-
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/xslt/example_atom.xsl
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/xslt/example_atom.xsl b/solr/example/example-DIH/solr/rss/conf/xslt/example_atom.xsl
deleted file mode 100644
index b6c2315..0000000
--- a/solr/example/example-DIH/solr/rss/conf/xslt/example_atom.xsl
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-
-<!-- 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- -->
-
-<!-- 
-  Simple transform of Solr query results to Atom
- -->
-
-<xsl:stylesheet version='1.0'
-    xmlns:xsl='http://www.w3.org/1999/XSL/Transform'>
-
-  <xsl:output
-       method="xml"
-       encoding="utf-8"
-       media-type="application/xml"
-  />
-
-  <xsl:template match='/'>
-    <xsl:variable name="query" select="response/lst[@name='responseHeader']/lst[@name='params']/str[@name='q']"/>
-    <feed xmlns="http://www.w3.org/2005/Atom">
-      <title>Example Solr Atom 1.0 Feed</title>
-      <subtitle>
-       This has been formatted by the sample "example_atom.xsl" transform -
-       use your own XSLT to get a nicer Atom feed.
-      </subtitle>
-      <author>
-        <name>Apache Solr</name>
-        <email>solr-user@lucene.apache.org</email>
-      </author>
-      <link rel="self" type="application/atom+xml" 
-            href="http://localhost:8983/solr/q={$query}&amp;wt=xslt&amp;tr=atom.xsl"/>
-      <updated>
-        <xsl:value-of select="response/result/doc[position()=1]/date[@name='timestamp']"/>
-      </updated>
-      <id>tag:localhost,2007:example</id>
-      <xsl:apply-templates select="response/result/doc"/>
-    </feed>
-  </xsl:template>
-    
-  <!-- search results xslt -->
-  <xsl:template match="doc">
-    <xsl:variable name="id" select="str[@name='id']"/>
-    <entry>
-      <title><xsl:value-of select="str[@name='name']"/></title>
-      <link href="http://localhost:8983/solr/select?q={$id}"/>
-      <id>tag:localhost,2007:<xsl:value-of select="$id"/></id>
-      <summary><xsl:value-of select="arr[@name='features']"/></summary>
-      <updated><xsl:value-of select="date[@name='timestamp']"/></updated>
-    </entry>
-  </xsl:template>
-
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/xslt/example_rss.xsl
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/xslt/example_rss.xsl b/solr/example/example-DIH/solr/rss/conf/xslt/example_rss.xsl
deleted file mode 100644
index c8ab5bf..0000000
--- a/solr/example/example-DIH/solr/rss/conf/xslt/example_rss.xsl
+++ /dev/null
@@ -1,66 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-
-<!-- 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- -->
-
-<!-- 
-  Simple transform of Solr query results to RSS
- -->
-
-<xsl:stylesheet version='1.0'
-    xmlns:xsl='http://www.w3.org/1999/XSL/Transform'>
-
-  <xsl:output
-       method="xml"
-       encoding="utf-8"
-       media-type="application/xml"
-  />
-  <xsl:template match='/'>
-    <rss version="2.0">
-       <channel>
-         <title>Example Solr RSS 2.0 Feed</title>
-         <link>http://localhost:8983/solr</link>
-         <description>
-          This has been formatted by the sample "example_rss.xsl" transform -
-          use your own XSLT to get a nicer RSS feed.
-         </description>
-         <language>en-us</language>
-         <docs>http://localhost:8983/solr</docs>
-         <xsl:apply-templates select="response/result/doc"/>
-       </channel>
-    </rss>
-  </xsl:template>
-  
-  <!-- search results xslt -->
-  <xsl:template match="doc">
-    <xsl:variable name="id" select="str[@name='id']"/>
-    <xsl:variable name="timestamp" select="date[@name='timestamp']"/>
-    <item>
-      <title><xsl:value-of select="str[@name='name']"/></title>
-      <link>
-        http://localhost:8983/solr/select?q=id:<xsl:value-of select="$id"/>
-      </link>
-      <description>
-        <xsl:value-of select="arr[@name='features']"/>
-      </description>
-      <pubDate><xsl:value-of select="$timestamp"/></pubDate>
-      <guid>
-        http://localhost:8983/solr/select?q=id:<xsl:value-of select="$id"/>
-      </guid>
-    </item>
-  </xsl:template>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/xslt/luke.xsl
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/xslt/luke.xsl b/solr/example/example-DIH/solr/rss/conf/xslt/luke.xsl
deleted file mode 100644
index 05fb5bf..0000000
--- a/solr/example/example-DIH/solr/rss/conf/xslt/luke.xsl
+++ /dev/null
@@ -1,337 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-    
-    http://www.apache.org/licenses/LICENSE-2.0
-    
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
--->
-
-
-<!-- 
-  Display the luke request handler with graphs
- -->
-<xsl:stylesheet
-    xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
-    xmlns="http://www.w3.org/1999/xhtml"
-    version="1.0"
-    >
-    <xsl:output
-        method="html"
-        encoding="UTF-8"
-        media-type="text/html"
-        doctype-public="-//W3C//DTD XHTML 1.0 Strict//EN"
-        doctype-system="http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"
-    />
-
-    <xsl:variable name="title">Solr Luke Request Handler Response</xsl:variable>
-
-    <xsl:template match="/">
-        <html xmlns="http://www.w3.org/1999/xhtml">
-            <head>
-                <link rel="stylesheet" type="text/css" href="solr-admin.css"/>
-                <link rel="icon" href="favicon.ico" type="image/x-icon"/>
-                <link rel="shortcut icon" href="favicon.ico" type="image/x-icon"/>
-                <title>
-                    <xsl:value-of select="$title"/>
-                </title>
-                <xsl:call-template name="css"/>
-
-            </head>
-            <body>
-                <h1>
-                    <xsl:value-of select="$title"/>
-                </h1>
-                <div class="doc">
-                    <ul>
-                        <xsl:if test="response/lst[@name='index']">
-                            <li>
-                                <a href="#index">Index Statistics</a>
-                            </li>
-                        </xsl:if>
-                        <xsl:if test="response/lst[@name='fields']">
-                            <li>
-                                <a href="#fields">Field Statistics</a>
-                                <ul>
-                                    <xsl:for-each select="response/lst[@name='fields']/lst">
-                                        <li>
-                                            <a href="#{@name}">
-                                                <xsl:value-of select="@name"/>
-                                            </a>
-                                        </li>
-                                    </xsl:for-each>
-                                </ul>
-                            </li>
-                        </xsl:if>
-                        <xsl:if test="response/lst[@name='doc']">
-                            <li>
-                                <a href="#doc">Document statistics</a>
-                            </li>
-                        </xsl:if>
-                    </ul>
-                </div>
-                <xsl:if test="response/lst[@name='index']">
-                    <h2><a name="index"/>Index Statistics</h2>
-                    <xsl:apply-templates select="response/lst[@name='index']"/>
-                </xsl:if>
-                <xsl:if test="response/lst[@name='fields']">
-                    <h2><a name="fields"/>Field Statistics</h2>
-                    <xsl:apply-templates select="response/lst[@name='fields']"/>
-                </xsl:if>
-                <xsl:if test="response/lst[@name='doc']">
-                    <h2><a name="doc"/>Document statistics</h2>
-                    <xsl:apply-templates select="response/lst[@name='doc']"/>
-                </xsl:if>
-            </body>
-        </html>
-    </xsl:template>
-
-    <xsl:template match="lst">
-        <xsl:if test="parent::lst">
-            <tr>
-                <td colspan="2">
-                    <div class="doc">
-                        <xsl:call-template name="list"/>
-                    </div>
-                </td>
-            </tr>
-        </xsl:if>
-        <xsl:if test="not(parent::lst)">
-            <div class="doc">
-                <xsl:call-template name="list"/>
-            </div>
-        </xsl:if>
-    </xsl:template>
-
-    <xsl:template name="list">
-        <xsl:if test="count(child::*)>0">
-            <table>
-                <thead>
-                    <tr>
-                        <th colspan="2">
-                            <p>
-                                <a name="{@name}"/>
-                            </p>
-                            <xsl:value-of select="@name"/>
-                        </th>
-                    </tr>
-                </thead>
-                <tbody>
-                    <xsl:choose>
-                        <xsl:when
-                            test="@name='histogram'">
-                            <tr>
-                                <td colspan="2">
-                                    <xsl:call-template name="histogram"/>
-                                </td>
-                            </tr>
-                        </xsl:when>
-                        <xsl:otherwise>
-                            <xsl:apply-templates/>
-                        </xsl:otherwise>
-                    </xsl:choose>
-                </tbody>
-            </table>
-        </xsl:if>
-    </xsl:template>
-
-    <xsl:template name="histogram">
-        <div class="doc">
-            <xsl:call-template name="barchart">
-                <xsl:with-param name="max_bar_width">50</xsl:with-param>
-                <xsl:with-param name="iwidth">800</xsl:with-param>
-                <xsl:with-param name="iheight">160</xsl:with-param>
-                <xsl:with-param name="fill">blue</xsl:with-param>
-            </xsl:call-template>
-        </div>
-    </xsl:template>
-
-    <xsl:template name="barchart">
-        <xsl:param name="max_bar_width"/>
-        <xsl:param name="iwidth"/>
-        <xsl:param name="iheight"/>
-        <xsl:param name="fill"/>
-        <xsl:variable name="max">
-            <xsl:for-each select="int">
-                <xsl:sort data-type="number" order="descending"/>
-                <xsl:if test="position()=1">
-                    <xsl:value-of select="."/>
-                </xsl:if>
-            </xsl:for-each>
-        </xsl:variable>
-        <xsl:variable name="bars">
-           <xsl:value-of select="count(int)"/>
-        </xsl:variable>
-        <xsl:variable name="bar_width">
-           <xsl:choose>
-             <xsl:when test="$max_bar_width &lt; ($iwidth div $bars)">
-               <xsl:value-of select="$max_bar_width"/>
-             </xsl:when>
-             <xsl:otherwise>
-               <xsl:value-of select="$iwidth div $bars"/>
-             </xsl:otherwise>
-           </xsl:choose>
-        </xsl:variable>
-        <table class="histogram">
-           <tbody>
-              <tr>
-                <xsl:for-each select="int">
-                   <td>
-                 <xsl:value-of select="."/>
-                 <div class="histogram">
-                  <xsl:attribute name="style">background-color: <xsl:value-of select="$fill"/>; width: <xsl:value-of select="$bar_width"/>px; height: <xsl:value-of select="($iheight*number(.)) div $max"/>px;</xsl:attribute>
-                 </div>
-                   </td> 
-                </xsl:for-each>
-              </tr>
-              <tr>
-                <xsl:for-each select="int">
-                   <td>
-                       <xsl:value-of select="@name"/>
-                   </td>
-                </xsl:for-each>
-              </tr>
-           </tbody>
-        </table>
-    </xsl:template>
-
-    <xsl:template name="keyvalue">
-        <xsl:choose>
-            <xsl:when test="@name">
-                <tr>
-                    <td class="name">
-                        <xsl:value-of select="@name"/>
-                    </td>
-                    <td class="value">
-                        <xsl:value-of select="."/>
-                    </td>
-                </tr>
-            </xsl:when>
-            <xsl:otherwise>
-                <xsl:value-of select="."/>
-            </xsl:otherwise>
-        </xsl:choose>
-    </xsl:template>
-
-    <xsl:template match="int|bool|long|float|double|uuid|date">
-        <xsl:call-template name="keyvalue"/>
-    </xsl:template>
-
-    <xsl:template match="arr">
-        <tr>
-            <td class="name">
-                <xsl:value-of select="@name"/>
-            </td>
-            <td class="value">
-                <ul>
-                    <xsl:for-each select="child::*">
-                        <li>
-                            <xsl:apply-templates/>
-                        </li>
-                    </xsl:for-each>
-                </ul>
-            </td>
-        </tr>
-    </xsl:template>
-
-    <xsl:template match="str">
-        <xsl:choose>
-            <xsl:when test="@name='schema' or @name='index' or @name='flags'">
-                <xsl:call-template name="schema"/>
-            </xsl:when>
-            <xsl:otherwise>
-                <xsl:call-template name="keyvalue"/>
-            </xsl:otherwise>
-        </xsl:choose>
-    </xsl:template>
-
-    <xsl:template name="schema">
-        <tr>
-            <td class="name">
-                <xsl:value-of select="@name"/>
-            </td>
-            <td class="value">
-                <xsl:if test="contains(.,'unstored')">
-                    <xsl:value-of select="."/>
-                </xsl:if>
-                <xsl:if test="not(contains(.,'unstored'))">
-                    <xsl:call-template name="infochar2string">
-                        <xsl:with-param name="charList">
-                            <xsl:value-of select="."/>
-                        </xsl:with-param>
-                    </xsl:call-template>
-                </xsl:if>
-            </td>
-        </tr>
-    </xsl:template>
-
-    <xsl:template name="infochar2string">
-        <xsl:param name="i">1</xsl:param>
-        <xsl:param name="charList"/>
-
-        <xsl:variable name="char">
-            <xsl:value-of select="substring($charList,$i,1)"/>
-        </xsl:variable>
-        <xsl:choose>
-            <xsl:when test="$char='I'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='I']"/> - </xsl:when>
-            <xsl:when test="$char='T'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='T']"/> - </xsl:when>
-            <xsl:when test="$char='S'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='S']"/> - </xsl:when>
-            <xsl:when test="$char='M'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='M']"/> - </xsl:when>
-            <xsl:when test="$char='V'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='V']"/> - </xsl:when>
-            <xsl:when test="$char='o'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='o']"/> - </xsl:when>
-            <xsl:when test="$char='p'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='p']"/> - </xsl:when>
-            <xsl:when test="$char='O'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='O']"/> - </xsl:when>
-            <xsl:when test="$char='L'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='L']"/> - </xsl:when>
-            <xsl:when test="$char='B'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='B']"/> - </xsl:when>
-            <xsl:when test="$char='C'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='C']"/> - </xsl:when>
-            <xsl:when test="$char='f'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='f']"/> - </xsl:when>
-            <xsl:when test="$char='l'">
-                <xsl:value-of select="/response/lst[@name='info']/lst/str[@name='l']"/> -
-            </xsl:when>
-        </xsl:choose>
-
-        <xsl:if test="not($i>=string-length($charList))">
-            <xsl:call-template name="infochar2string">
-                <xsl:with-param name="i">
-                    <xsl:value-of select="$i+1"/>
-                </xsl:with-param>
-                <xsl:with-param name="charList">
-                    <xsl:value-of select="$charList"/>
-                </xsl:with-param>
-            </xsl:call-template>
-        </xsl:if>
-    </xsl:template>
-    <xsl:template name="css">
-        <style type="text/css">
-            <![CDATA[
-            td.name {font-style: italic; font-size:80%; }
-            .doc { margin: 0.5em; border: solid grey 1px; }
-            .exp { display: none; font-family: monospace; white-space: pre; }
-            div.histogram { background: none repeat scroll 0%; -moz-background-clip: -moz-initial; -moz-background-origin: -moz-initial; -moz-background-inline-policy: -moz-initial;}
-            table.histogram { width: auto; vertical-align: bottom; }
-            table.histogram td, table.histogram th { text-align: center; vertical-align: bottom; border-bottom: 1px solid #ff9933; width: auto; }
-            ]]>
-        </style>
-    </xsl:template>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/conf/xslt/updateXml.xsl
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/xslt/updateXml.xsl b/solr/example/example-DIH/solr/rss/conf/xslt/updateXml.xsl
deleted file mode 100644
index a96e1d0..0000000
--- a/solr/example/example-DIH/solr/rss/conf/xslt/updateXml.xsl
+++ /dev/null
@@ -1,70 +0,0 @@
-<!-- 
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- -->
-
-<!--
-  Simple transform of Solr query response into Solr Update XML compliant XML.
-  When used in the xslt response writer you will get UpdaateXML as output.
-  But you can also store a query response XML to disk and feed this XML to
-  the XSLTUpdateRequestHandler to index the content. Provided as example only.
-  See http://wiki.apache.org/solr/XsltUpdateRequestHandler for more info
- -->
-<xsl:stylesheet version='1.0' xmlns:xsl='http://www.w3.org/1999/XSL/Transform'>
-  <xsl:output media-type="text/xml" method="xml" indent="yes"/>
-
-  <xsl:template match='/'>
-    <add>
-        <xsl:apply-templates select="response/result/doc"/>
-    </add>
-  </xsl:template>
-  
-  <!-- Ignore score (makes no sense to index) -->
-  <xsl:template match="doc/*[@name='score']" priority="100">
-  </xsl:template>
-
-  <xsl:template match="doc">
-    <xsl:variable name="pos" select="position()"/>
-    <doc>
-        <xsl:apply-templates>
-          <xsl:with-param name="pos"><xsl:value-of select="$pos"/></xsl:with-param>
-        </xsl:apply-templates>
-    </doc>
-  </xsl:template>
-
-  <!-- Flatten arrays to duplicate field lines -->
-  <xsl:template match="doc/arr" priority="100">
-      <xsl:variable name="fn" select="@name"/>
-      
-      <xsl:for-each select="*">
-        <xsl:element name="field">
-          <xsl:attribute name="name"><xsl:value-of select="$fn"/></xsl:attribute>
-          <xsl:value-of select="."/>
-        </xsl:element>
-      </xsl:for-each>
-  </xsl:template>
-
-
-  <xsl:template match="doc/*">
-      <xsl:variable name="fn" select="@name"/>
-
-      <xsl:element name="field">
-        <xsl:attribute name="name"><xsl:value-of select="$fn"/></xsl:attribute>
-        <xsl:value-of select="."/>
-      </xsl:element>
-  </xsl:template>
-
-  <xsl:template match="*"/>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/580f6e98/solr/example/example-DIH/solr/rss/core.properties
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/core.properties b/solr/example/example-DIH/solr/rss/core.properties
deleted file mode 100644
index e69de29..0000000


[24/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10387: zkTransfer normalizes destination path incorrectly if source is a windows directory

Posted by ab...@apache.org.
SOLR-10387: zkTransfer normalizes destination path incorrectly if source is a windows directory


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/edcdc305
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/edcdc305
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/edcdc305

Branch: refs/heads/jira/solr-9959
Commit: edcdc3052ba95840593ace32d6d9a7a6e4ebe7ea
Parents: efdb04d
Author: Erick Erickson <er...@apache.org>
Authored: Wed Mar 29 21:13:40 2017 -0700
Committer: Erick Erickson <er...@apache.org>
Committed: Wed Mar 29 21:13:40 2017 -0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  3 ++
 .../apache/solr/cloud/SolrCLIZkUtilsTest.java   | 13 ++++---
 .../solr/common/cloud/ZkMaintenanceUtils.java   | 41 +++++++++++---------
 3 files changed, 32 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/edcdc305/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 3692bd5..840de48 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -148,6 +148,9 @@ Bug Fixes
 
 * SOLR-10369: bin\solr.cmd delete and healthcheck now works again; fixed continuation chars ^ (Luis Goes via janhoy)
 
+* SOLR-10387: zkTransfer normalizes destination path incorrectly if source is a windows directory 
+  (gopikannan venugopalsamy, Erick Erickson)
+
 Other Changes
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/edcdc305/solr/core/src/test/org/apache/solr/cloud/SolrCLIZkUtilsTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/SolrCLIZkUtilsTest.java b/solr/core/src/test/org/apache/solr/cloud/SolrCLIZkUtilsTest.java
index 35ba1d4..776075e 100644
--- a/solr/core/src/test/org/apache/solr/cloud/SolrCLIZkUtilsTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/SolrCLIZkUtilsTest.java
@@ -18,6 +18,7 @@
 package org.apache.solr.cloud;
 
 import java.io.ByteArrayOutputStream;
+import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.charset.Charset;
@@ -229,9 +230,9 @@ public class SolrCLIZkUtilsTest extends SolrCloudTestCase {
     // NOTE: really can't test copying to '.' because the test framework doesn't allow altering the source tree
     // and at least IntelliJ's CWD is in the source tree.
 
-    // copy to local ending in '/'
+    // copy to local ending in separator
     //src and cp3 and cp4 are valid
-    String localSlash = tmp.normalize() + "/cpToLocal/";
+    String localSlash = tmp.normalize() +  File.separator +"cpToLocal" + File.separator;
     args = new String[]{
         "-src", "zk:/cp3/schema.xml",
         "-dst", localSlash,
@@ -246,7 +247,7 @@ public class SolrCLIZkUtilsTest extends SolrCloudTestCase {
     // copy to ZK ending in '/'.
     //src and cp3 are valid
     args = new String[]{
-        "-src", "file:" + srcPathCheck.normalize().toAbsolutePath().toString() + "/solrconfig.xml",
+        "-src", "file:" + srcPathCheck.normalize().toAbsolutePath().toString() + File.separator + "solrconfig.xml",
         "-dst", "zk:/powerup/",
         "-recurse", "false",
         "-zkHost", zkAddr,
@@ -259,7 +260,7 @@ public class SolrCLIZkUtilsTest extends SolrCloudTestCase {
     // copy individual file up
     //src and cp3 are valid
     args = new String[]{
-        "-src", "file:" + srcPathCheck.normalize().toAbsolutePath().toString() + "/solrconfig.xml",
+        "-src", "file:" + srcPathCheck.normalize().toAbsolutePath().toString() + File.separator + "solrconfig.xml",
         "-dst", "zk:/copyUpFile.xml",
         "-recurse", "false",
         "-zkHost", zkAddr,
@@ -272,7 +273,7 @@ public class SolrCLIZkUtilsTest extends SolrCloudTestCase {
     // copy individual file down
     //src and cp3 are valid
 
-    String localNamed = tmp.normalize().toString() + "/localnamed/renamed.txt";
+    String localNamed = tmp.normalize().toString() + File.separator + "localnamed" + File.separator +  "renamed.txt";
     args = new String[]{
         "-src", "zk:/cp4/solrconfig.xml",
         "-dst", "file:" + localNamed,
@@ -404,7 +405,7 @@ public class SolrCLIZkUtilsTest extends SolrCloudTestCase {
     // Files are in mv2
     // Now fail if we specify "file:". Everything should still be in /mv2
     args = new String[]{
-        "-src", "file:/mv2",
+        "-src", "file:" + File.separator + "mv2",
         "-dst", "/mv3",
         "-zkHost", zkAddr,
     };

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/edcdc305/solr/solrj/src/java/org/apache/solr/common/cloud/ZkMaintenanceUtils.java
----------------------------------------------------------------------
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkMaintenanceUtils.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkMaintenanceUtils.java
index f569ae3..dfdc8cf 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkMaintenanceUtils.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkMaintenanceUtils.java
@@ -119,33 +119,36 @@ public class ZkMaintenanceUtils {
         throw new SolrServerException("Local path " + Paths.get(src).toAbsolutePath() + " is a directory and recurse is false");
       }
     }
-    if (srcIsZk == false && dstIsZk == false) {
-      throw new SolrServerException("At least one of the source and dest parameters must be prefixed with 'zk:' ");
-    }
+
     if (dstIsZk && dst.length() == 0) {
       dst = "/"; // for consistency, one can copy from zk: and send to zk:/
     }
-    dst = normalizeDest(src, dst);
+    dst = normalizeDest(src, dst, srcIsZk, dstIsZk);
 
+    // ZK -> ZK copy.
     if (srcIsZk && dstIsZk) {
       traverseZkTree(zkClient, src, VISIT_ORDER.VISIT_PRE, new ZkCopier(zkClient, src, dst));
       return;
     }
+
+    //local -> ZK copy
     if (dstIsZk) {
       uploadToZK(zkClient, Paths.get(src), dst, null);
       return;
     }
 
-    // Copying individual files from ZK requires special handling since downloadFromZK assumes it's a directory.
+    // Copying individual files from ZK requires special handling since downloadFromZK assumes the node has children.
     // This is kind of a weak test for the notion of "directory" on Zookeeper.
+    // ZK -> local copy where ZK is a parent node
     if (zkClient.getChildren(src, null, true).size() > 0) {
       downloadFromZK(zkClient, src, Paths.get(dst));
       return;
     }
 
+    // Single file ZK -> local copy where ZK is a leaf node
     if (Files.isDirectory(Paths.get(dst))) {
-      if (dst.endsWith("/") == false) dst += "/";
-      dst = normalizeDest(src, dst);
+      if (dst.endsWith(File.separator) == false) dst += File.separator;
+      dst = normalizeDest(src, dst, srcIsZk, dstIsZk);
     }
     byte[] data = zkClient.getData(src, null, null, true);
     Path filename = Paths.get(dst);
@@ -154,31 +157,32 @@ public class ZkMaintenanceUtils {
     Files.write(filename, data);
   }
 
-  
-  private static String normalizeDest(String srcName, String dstName) {
+  // If the dest ends with a separator, it's a directory or non-leaf znode, so return the
+  // last element of the src to appended to the dstName.
+  private static String normalizeDest(String srcName, String dstName, boolean srcIsZk, boolean dstIsZk) {
     // Special handling for "."
     if (dstName.equals(".")) {
       return Paths.get(".").normalize().toAbsolutePath().toString();
     }
-    // Pull the last element of the src path and add it to the dst if the src does NOT end in a slash 
 
-    // If the source ends in a slash, do not append the last segment to the dest
-    
-    if (dstName.endsWith("/")) { // Dest is a directory.
-      int pos = srcName.lastIndexOf("/");
+    String dstSeparator = (dstIsZk) ? "/" : File.separator;
+    String srcSeparator = (srcIsZk) ? "/" : File.separator;
+
+    if (dstName.endsWith(dstSeparator)) { // Dest is a directory or non-leaf znode, append last element of the src path.
+      int pos = srcName.lastIndexOf(srcSeparator);
       if (pos < 0) {
         dstName += srcName;
       } else {
         dstName += srcName.substring(pos + 1);
       }
     }
-    
+
     log.info("copying from '{}' to '{}'", srcName, dstName);
     return dstName;
   }
 
   public static void moveZnode(SolrZkClient zkClient, String src, String dst) throws SolrServerException, KeeperException, InterruptedException {
-    String destName = normalizeDest(src, dst);
+    String destName = normalizeDest(src, dst, true, true);
 
     // Special handling if the source has no children, i.e. copying just a single file.
     if (zkClient.getChildren(src, null, true).size() == 0) {
@@ -384,12 +388,11 @@ public class ZkMaintenanceUtils {
     }
   }
 
-  // Take into account Windows file separaters when making a Znode's name.
+  // Take into account Windows file separators when making a Znode's name.
   public static String createZkNodeName(String zkRoot, Path root, Path file) {
     String relativePath = root.relativize(file).toString();
     // Windows shenanigans
-    String separator = root.getFileSystem().getSeparator();
-    if ("\\".equals(separator))
+    if ("\\".equals(File.separator))
       relativePath = relativePath.replaceAll("\\\\", "/");
     // It's possible that the relative path and file are the same, in which case
     // adding the bare slash is A Bad Idea unless it's a non-leaf data node


[19/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-9993: Add support for ExpandComponent with PointFields

Posted by ab...@apache.org.
SOLR-9993: Add support for ExpandComponent with PointFields


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/9b7c8d56
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/9b7c8d56
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/9b7c8d56

Branch: refs/heads/jira/solr-9959
Commit: 9b7c8d5606cdd88725c2779389bfb20e1d01cb49
Parents: deddc9b
Author: Cao Manh Dat <da...@apache.org>
Authored: Wed Mar 29 13:52:51 2017 +0700
Committer: Cao Manh Dat <da...@apache.org>
Committed: Wed Mar 29 13:52:51 2017 +0700

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 +
 .../solr/handler/component/ExpandComponent.java | 75 +++++++++++++-------
 .../handler/component/TestExpandComponent.java  |  2 -
 3 files changed, 53 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9b7c8d56/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 4e63926..80c2aa0 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -128,6 +128,8 @@ New Features
 
 * SOLR-10349: Add totalTermFreq support to TermsComponent. (Shai Erera)
 
+* SOLR-9993: Add support for ExpandComponent with PointFields. (Cao Manh Dat)
+
 Optimizations
 ----------------------
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9b7c8d56/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index f6ecd8d..656ac71 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -73,7 +73,12 @@ import org.apache.solr.common.util.SimpleOrderedMap;
 import org.apache.solr.core.PluginInfo;
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.schema.DoublePointField;
 import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.FloatPointField;
+import org.apache.solr.schema.IntPointField;
+import org.apache.solr.schema.LongPointField;
+import org.apache.solr.schema.SchemaField;
 import org.apache.solr.schema.StrField;
 import org.apache.solr.schema.TrieDoubleField;
 import org.apache.solr.schema.TrieFloatField;
@@ -209,7 +214,8 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
     SolrIndexSearcher searcher = req.getSearcher();
     LeafReader reader = searcher.getSlowAtomicReader();
 
-    FieldType fieldType = searcher.getSchema().getField(field).getType();
+    SchemaField schemaField = searcher.getSchema().getField(field);
+    FieldType fieldType = schemaField.getType();
 
     SortedDocValues values = null;
     long nullValue = 0;
@@ -228,17 +234,18 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
       //Get the nullValue for the numeric collapse field
       String defaultValue = searcher.getSchema().getField(field).getDefaultValue();
       if(defaultValue != null) {
-        if(fieldType instanceof TrieIntField || fieldType instanceof TrieLongField) {
+        if(fieldType instanceof TrieIntField || fieldType instanceof TrieLongField ||
+            fieldType instanceof IntPointField || fieldType instanceof LongPointField) {
           nullValue = Long.parseLong(defaultValue);
-        } else if(fieldType instanceof TrieFloatField){
+        } else if(fieldType instanceof TrieFloatField || fieldType instanceof FloatPointField){
           nullValue = Float.floatToIntBits(Float.parseFloat(defaultValue));
-        } else if(fieldType instanceof TrieDoubleField){
+        } else if(fieldType instanceof TrieDoubleField || fieldType instanceof DoublePointField){
           nullValue = Double.doubleToLongBits(Double.parseDouble(defaultValue));
         }
       } else {
-        if(fieldType instanceof TrieFloatField){
+        if(fieldType instanceof TrieFloatField || fieldType instanceof FloatPointField){
           nullValue = Float.floatToIntBits(0.0f);
-        } else if(fieldType instanceof TrieDoubleField){
+        } else if(fieldType instanceof TrieDoubleField || fieldType instanceof DoublePointField){
           nullValue = Double.doubleToLongBits(0.0f);
         }
       }
@@ -369,7 +376,11 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
       }
 
       if(count > 0 && count < 200) {
-        groupQuery = getGroupQuery(field, fieldType, count, groupSet);
+        if (fieldType.isPointField()) {
+          groupQuery = getPointGroupQuery(schemaField, count, groupSet);
+        } else {
+          groupQuery = getGroupQuery(field, fieldType, count, groupSet);
+        }
       }
     }
 
@@ -442,13 +453,7 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
           String group = charsRef.toString();
           outMap.add(group, slice);
         } else {
-          if(fieldType instanceof TrieIntField || fieldType instanceof TrieLongField ) {
-            outMap.add(Long.toString(groupValue), slice);
-          } else if(fieldType instanceof TrieFloatField) {
-            outMap.add(Float.toString(Float.intBitsToFloat((int) groupValue)), slice);
-          } else if(fieldType instanceof TrieDoubleField) {
-            outMap.add(Double.toString(Double.longBitsToDouble(groupValue)), slice);
-          }
+          outMap.add(numericToString(fieldType, groupValue), slice);
         }
       }
     }
@@ -703,18 +708,10 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
     BytesRefBuilder term = new BytesRefBuilder();
     Iterator<LongCursor> it = groupSet.iterator();
     int index = -1;
-    String stringVal =  null;
+
     while (it.hasNext()) {
       LongCursor cursor = it.next();
-      if(ft instanceof TrieIntField || ft instanceof TrieLongField) {
-        stringVal = Long.toString(cursor.value);
-      } else {
-        if(ft instanceof TrieFloatField) {
-          stringVal = Float.toString(Float.intBitsToFloat((int)cursor.value));
-        } else {
-          stringVal = Double.toString(Double.longBitsToDouble(cursor.value));
-        }
-      }
+      String stringVal = numericToString(ft, cursor.value);
       ft.readableToIndexed(stringVal, term);
       bytesRefs[++index] = term.toBytesRef();
     }
@@ -722,6 +719,36 @@ public class ExpandComponent extends SearchComponent implements PluginInfoInitia
     return new SolrConstantScoreQuery(new QueryWrapperFilter(new TermInSetQuery(fname, bytesRefs)));
   }
 
+  private Query getPointGroupQuery(SchemaField sf,
+                                   int size,
+                                   LongHashSet groupSet) {
+
+    Iterator<LongCursor> it = groupSet.iterator();
+    List<String> values = new ArrayList<>(size);
+    FieldType ft = sf.getType();
+    while (it.hasNext()) {
+      LongCursor cursor = it.next();
+      values.add(numericToString(ft, cursor.value));
+    }
+
+    return new SolrConstantScoreQuery(new QueryWrapperFilter(sf.getType().getSetQuery(null, sf, values)));
+  }
+
+  private String numericToString(FieldType fieldType, long val) {
+    if (fieldType.getNumberType() != null) {
+      switch (fieldType.getNumberType()) {
+        case INTEGER:
+        case LONG:
+          return Long.toString(val);
+        case FLOAT:
+          return Float.toString(Float.intBitsToFloat((int)val));
+        case DOUBLE:
+          return Double.toString(Double.longBitsToDouble(val));
+      }
+    }
+    throw new IllegalArgumentException("FieldType must be INT,LONG,FLOAT,DOUBLE found " + fieldType);
+  }
+
   private Query getGroupQuery(String fname,
                               int size,
                               IntObjectHashMap<BytesRef> ordBytes) throws Exception {

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/9b7c8d56/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java b/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
index 7baa5a9..d1906d5 100644
--- a/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
+++ b/solr/core/src/test/org/apache/solr/handler/component/TestExpandComponent.java
@@ -21,14 +21,12 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.solr.SolrTestCaseJ4;
-import org.apache.solr.SolrTestCaseJ4.SuppressPointFields;
 import org.apache.solr.common.params.ModifiableSolrParams;
 import org.apache.solr.search.CollapsingQParserPlugin;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-@SuppressPointFields
 public class TestExpandComponent extends SolrTestCaseJ4 {
 
   @BeforeClass


[11/52] [abbrv] lucene-solr:jira/solr-9959: SOLR-10343: Update Solr default/example and test configs to use SynonymGraphFilterFactory

Posted by ab...@apache.org.
SOLR-10343: Update Solr default/example and test configs to use SynonymGraphFilterFactory


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/1a80e4d6
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/1a80e4d6
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/1a80e4d6

Branch: refs/heads/jira/solr-9959
Commit: 1a80e4d6942dd7af214c999e0e6540564efc02ac
Parents: 390ef9a
Author: Steve Rowe <sa...@gmail.com>
Authored: Tue Mar 28 11:47:02 2017 -0400
Committer: Steve Rowe <sa...@gmail.com>
Committed: Tue Mar 28 11:47:02 2017 -0400

----------------------------------------------------------------------
 solr/CHANGES.txt                                |  2 ++
 .../clustering/solr/collection1/conf/schema.xml |  8 ++---
 .../conf/dataimport-schema-no-unique-key.xml    |  4 +--
 .../conf/dataimport-solr_id-schema.xml          |  8 ++---
 .../extraction/solr/collection1/conf/schema.xml | 21 +++++++++---
 .../test-files/solr/collection1/conf/schema.xml |  2 +-
 .../uima/solr/collection1/conf/schema.xml       | 12 +++----
 .../test-files/uima/uima-tokenizers-schema.xml  |  2 +-
 .../conf/schema-HighlighterMaxOffsetTest.xml    |  4 +--
 .../collection1/conf/schema-copyfield-test.xml  | 21 +++++++++---
 .../collection1/conf/schema-docValuesJoin.xml   |  2 +-
 .../collection1/conf/schema-psuedo-fields.xml   |  2 +-
 .../collection1/conf/schema-required-fields.xml | 21 +++++++++---
 .../solr/collection1/conf/schema-rest.xml       | 34 ++++++++++++++------
 .../conf/schema-single-dynamic-copy-field.xml   | 34 ++++++++++++++------
 .../collection1/conf/schema-spellchecker.xml    |  2 +-
 .../conf/schema-synonym-tokenizer.xml           |  5 +--
 .../solr/collection1/conf/schema-trie.xml       |  8 ++---
 .../test-files/solr/collection1/conf/schema.xml | 25 ++++++++++----
 .../solr/collection1/conf/schema11.xml          | 12 +++----
 .../solr/collection1/conf/schema12.xml          | 29 ++++++++++++-----
 .../solr/collection1/conf/schema15.xml          | 29 ++++++++++++-----
 .../solr/collection1/conf/schema_latest.xml     | 20 ++++++------
 .../solr/collection1/conf/schemasurround.xml    | 31 ++++++++++++------
 .../example-DIH/solr/db/conf/managed-schema     | 20 ++++++------
 .../example-DIH/solr/mail/conf/managed-schema   | 21 ++++++------
 .../example-DIH/solr/rss/conf/managed-schema    | 24 +++++++-------
 .../example-DIH/solr/solr/conf/managed-schema   | 20 ++++++------
 solr/example/files/conf/managed-schema          | 12 +++----
 .../basic_configs/conf/managed-schema           | 20 ++++++------
 .../conf/managed-schema                         | 20 ++++++------
 .../conf/managed-schema                         | 20 ++++++------
 32 files changed, 317 insertions(+), 178 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/CHANGES.txt
----------------------------------------------------------------------
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index b68c62f..2d180a3 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -156,6 +156,8 @@ Other Changes
   
 * SOLR-10344: Update Solr default/example and test configs to use WordDelimiterGraphFilterFactory. (Steve Rowe)
 
+* SOLR-10343: Update Solr default/example and test configs to use SynonymGraphFilterFactory. (Steve Rowe)
+
 ==================  6.5.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml
index 02626a0..e9ef115 100644
--- a/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml
+++ b/solr/contrib/clustering/src/test-files/clustering/solr/collection1/conf/schema.xml
@@ -166,7 +166,7 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!-- Case insensitive stop word removal.
       -->
@@ -184,7 +184,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
+      <!--<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
@@ -201,7 +201,7 @@
   <fieldType name="textTight" class="solr.TextField" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+      <!--<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
@@ -213,7 +213,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+      <!--<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-schema-no-unique-key.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-schema-no-unique-key.xml b/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-schema-no-unique-key.xml
index 84d0dbe..5ef7eda 100644
--- a/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-schema-no-unique-key.xml
+++ b/solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/collection1/conf/dataimport-schema-no-unique-key.xml
@@ -169,7 +169,7 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
@@ -182,7 +182,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
+      <!--<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solr_id-schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solr_id-schema.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solr_id-schema.xml
index da4bd39..a99a0b4 100644
--- a/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solr_id-schema.xml
+++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/collection1/conf/dataimport-solr_id-schema.xml
@@ -167,7 +167,7 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
@@ -180,7 +180,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
+      <!--<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>-->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
@@ -197,7 +197,7 @@
   <fieldType name="textTight" class="solr.TextField" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>-->
+      <!--<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>-->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
@@ -209,7 +209,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <!--<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>-->
+      <!--<filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>-->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml b/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
index b743ed8..b65c345 100644
--- a/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
+++ b/solr/contrib/extraction/src/test-files/extraction/solr/collection1/conf/schema.xml
@@ -301,9 +301,14 @@
 
   <!-- less flexible in matching skus, but less chance of a false match -->
   <fieldType name="syn" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
     </analyzer>
   </fieldType>
 
@@ -311,9 +316,17 @@
        synonyms "better"
     -->
   <fieldType name="dedup" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory"
+              synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory"
+      <filter class="solr.SynonymGraphFilterFactory"
               synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/contrib/ltr/src/test-files/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/ltr/src/test-files/solr/collection1/conf/schema.xml b/solr/contrib/ltr/src/test-files/solr/collection1/conf/schema.xml
index 0b958c0..f27f092 100644
--- a/solr/contrib/ltr/src/test-files/solr/collection1/conf/schema.xml
+++ b/solr/contrib/ltr/src/test-files/solr/collection1/conf/schema.xml
@@ -63,7 +63,7 @@
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"  />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldType>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml b/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml
index 89d44e6..f1aebb6 100644
--- a/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml
+++ b/solr/contrib/uima/src/test-files/uima/solr/collection1/conf/schema.xml
@@ -203,7 +203,7 @@
         <tokenizer class="solr.MockTokenizerFactory" />
         <!--
           in this example, we will only use synonyms at query time
-          <filter class="solr.SynonymFilterFactory"
+          <filter class="solr.SynonymGraphFilterFactory"
           synonyms="index_synonyms.txt" ignoreCase="true"
           expand="false"/>
         -->
@@ -220,7 +220,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"
           ignoreCase="true" expand="true" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />
@@ -242,7 +242,7 @@
       positionIncrementGap="100">
       <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"
           ignoreCase="true" expand="false" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />
@@ -261,7 +261,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"
                 ignoreCase="true" expand="false" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
                 words="stopwords.txt" />
@@ -292,7 +292,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"
           ignoreCase="true" expand="true" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />
@@ -326,7 +326,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"
           ignoreCase="true" expand="true" />
         <filter class="solr.StopFilterFactory" ignoreCase="true"
           words="stopwords.txt" />

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml
----------------------------------------------------------------------
diff --git a/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml b/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml
index 229d69b..a6a4943 100644
--- a/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml
+++ b/solr/contrib/uima/src/test-files/uima/uima-tokenizers-schema.xml
@@ -203,7 +203,7 @@
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!--
         in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory"
+        <filter class="solr.SynonymGraphFilterFactory"
         synonyms="index_synonyms.txt" ignoreCase="true"
         expand="false"/>
       -->

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema-HighlighterMaxOffsetTest.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-HighlighterMaxOffsetTest.xml b/solr/core/src/test-files/solr/collection1/conf/schema-HighlighterMaxOffsetTest.xml
index 122d4ce..fce717c 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-HighlighterMaxOffsetTest.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-HighlighterMaxOffsetTest.xml
@@ -57,7 +57,7 @@ Test for HighlighterMaxOffsetTest which requires the use of ReversedWildcardFilt
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
@@ -76,7 +76,7 @@ Test for HighlighterMaxOffsetTest which requires the use of ReversedWildcardFilt
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml b/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
index a9df7f8..665114a 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-copyfield-test.xml
@@ -290,9 +290,14 @@
 
   <!-- less flexible in matching skus, but less chance of a false match -->
   <fieldType name="syn" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
     </analyzer>
   </fieldType>
 
@@ -300,9 +305,17 @@
        synonyms "better"
     -->
   <fieldType name="dedup" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory"
+              synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory"
+      <filter class="solr.SynonymGraphFilterFactory"
               synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema-docValuesJoin.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-docValuesJoin.xml b/solr/core/src/test-files/solr/collection1/conf/schema-docValuesJoin.xml
index baed872..d152d98 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-docValuesJoin.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-docValuesJoin.xml
@@ -51,7 +51,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema-psuedo-fields.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-psuedo-fields.xml b/solr/core/src/test-files/solr/collection1/conf/schema-psuedo-fields.xml
index 8491ce8..f3b0e30 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-psuedo-fields.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-psuedo-fields.xml
@@ -59,7 +59,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml b/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
index c92b901..cf980ca 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-required-fields.xml
@@ -273,9 +273,14 @@
 
   <!-- less flexible in matching skus, but less chance of a false match -->
   <fieldType name="syn" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index\u2248">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
     </analyzer>
   </fieldType>
 
@@ -283,9 +288,17 @@
        synonyms "better"
     -->
   <fieldType name="dedup" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory"
+              synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory"
+      <filter class="solr.SynonymGraphFilterFactory"
               synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema-rest.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-rest.xml b/solr/core/src/test-files/solr/collection1/conf/schema-rest.xml
index 7d9bf02..aa4c21d 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-rest.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-rest.xml
@@ -114,7 +114,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -137,7 +137,8 @@
     <analyzer type="index">
       <tokenizer class="solr.StandardTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
       -->
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.EnglishPossessiveFilterFactory"/>
@@ -149,7 +150,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.EnglishPossessiveFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -178,7 +179,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -408,9 +409,14 @@
 
   <!-- less flexible in matching skus, but less chance of a false match -->
   <fieldType name="syn" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
     </analyzer>
   </fieldType>
 
@@ -420,7 +426,7 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
@@ -431,7 +437,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
@@ -445,9 +451,17 @@
        synonyms "better"
     -->
   <fieldType name="dedup" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory"
+              synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory"
+      <filter class="solr.SynonymGraphFilterFactory"
               synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema-single-dynamic-copy-field.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-single-dynamic-copy-field.xml b/solr/core/src/test-files/solr/collection1/conf/schema-single-dynamic-copy-field.xml
index 72dc723..96be1d9 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-single-dynamic-copy-field.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-single-dynamic-copy-field.xml
@@ -114,7 +114,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -137,7 +137,8 @@
     <analyzer type="index">
       <tokenizer class="solr.StandardTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-              <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+              <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+              <filter class="solr.FlattenGraphFilterFactory"/>
               -->
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.EnglishPossessiveFilterFactory"/>
@@ -149,7 +150,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.LowerCaseFilterFactory"/>
       <filter class="solr.EnglishPossessiveFilterFactory"/>
       <filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
@@ -178,7 +179,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -408,9 +409,14 @@
 
   <!-- less flexible in matching skus, but less chance of a false match -->
   <fieldType name="syn" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
     </analyzer>
   </fieldType>
 
@@ -420,7 +426,7 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-              <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+              <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
               -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
@@ -431,7 +437,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
@@ -445,9 +451,17 @@
            synonyms "better"
         -->
   <fieldType name="dedup" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory"
+              synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory"
+      <filter class="solr.SynonymGraphFilterFactory"
               synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema-spellchecker.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-spellchecker.xml b/solr/core/src/test-files/solr/collection1/conf/schema-spellchecker.xml
index 11e7dba..42c17da 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-spellchecker.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-spellchecker.xml
@@ -54,7 +54,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.StandardFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema-synonym-tokenizer.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-synonym-tokenizer.xml b/solr/core/src/test-files/solr/collection1/conf/schema-synonym-tokenizer.xml
index b443138..e3ff5ec 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-synonym-tokenizer.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-synonym-tokenizer.xml
@@ -15,7 +15,7 @@
   limitations under the License.
   -->
 
-<!-- Test schema file for SynonymFilterFactory argument "tokenizerFactory" -->
+<!-- Test schema file for SynonymGraphFilterFactory argument "tokenizerFactory" -->
 
 <schema name="synonym-test" version="1.0">
   <fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
@@ -23,8 +23,9 @@
   <fieldType name="text_synonyms" class="solr.TextField" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"
               tokenizerFactory="solr.StandardTokenizerFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml b/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
index 375036f..f3d4c10 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema-trie.xml
@@ -162,7 +162,7 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
@@ -175,7 +175,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
@@ -192,7 +192,7 @@
   <fieldType name="textTight" class="solr.TextField" positionIncrementGap="100">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
@@ -204,7 +204,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema.xml b/solr/core/src/test-files/solr/collection1/conf/schema.xml
index 23ac326..bdda244 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema.xml
@@ -213,7 +213,7 @@
   <fieldType name="lowerpunctfilt" class="solr.TextField">
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
@@ -221,7 +221,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
               catenateNumbers="1" catenateAll="1" splitOnCaseChange="1"/>
       <filter class="solr.LowerCaseFilterFactory"/>
@@ -388,9 +388,14 @@
 
   <!-- less flexible in matching skus, but less chance of a false match -->
   <fieldType name="syn" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="old_synonyms.txt"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="old_synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="old_synonyms.txt"/>
     </analyzer>
   </fieldType>
 
@@ -398,9 +403,17 @@
        synonyms "better"
     -->
   <fieldType name="dedup" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory"
+              synonyms="old_synonyms.txt" expand="true"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory"
+      <filter class="solr.SynonymGraphFilterFactory"
               synonyms="old_synonyms.txt" expand="true"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema11.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema11.xml b/solr/core/src/test-files/solr/collection1/conf/schema11.xml
index db0770b..cccf79a 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema11.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema11.xml
@@ -158,7 +158,7 @@
       <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
         -->
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
@@ -170,7 +170,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -186,7 +186,7 @@
     <fieldType name="textTight" class="solr.TextField" positionIncrementGap="100" >
       <analyzer type="index">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -197,7 +197,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.MockTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -426,7 +426,7 @@ valued. -->
      <analyzer type="index">
        <tokenizer class="solr.MockTokenizerFactory"/>
        <!-- in this example, we will only use synonyms at query time
-            <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+            <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
        -->
        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
@@ -438,7 +438,7 @@ valued. -->
      </analyzer>
      <analyzer type="query">
        <tokenizer class="solr.MockTokenizerFactory"/>
-       <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+       <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
        <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
        <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
        <filter class="solr.LowerCaseFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema12.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema12.xml b/solr/core/src/test-files/solr/collection1/conf/schema12.xml
index db91377..7ea770b 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema12.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema12.xml
@@ -123,7 +123,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -154,7 +154,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -378,9 +378,14 @@
 
   <!-- less flexible in matching skus, but less chance of a false match -->
   <fieldType name="syn" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
     </analyzer>
   </fieldType>
 
@@ -390,7 +395,7 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
@@ -401,7 +406,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
@@ -415,9 +420,17 @@
        synonyms "better"
     -->
   <fieldType name="dedup" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory"
+              synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory"
+      <filter class="solr.SynonymGraphFilterFactory"
               synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema15.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema15.xml b/solr/core/src/test-files/solr/collection1/conf/schema15.xml
index 8fb8d44..57c6bf1 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema15.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema15.xml
@@ -115,7 +115,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -146,7 +146,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -376,9 +376,14 @@
 
   <!-- less flexible in matching skus, but less chance of a false match -->
   <fieldType name="syn" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
     </analyzer>
   </fieldType>
 
@@ -388,7 +393,7 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
@@ -399,7 +404,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
@@ -413,9 +418,17 @@
        synonyms "better"
     -->
   <fieldType name="dedup" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory"
+              synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory"
+      <filter class="solr.SynonymGraphFilterFactory"
               synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml b/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
index 1163ab5..b19a512 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schema_latest.xml
@@ -484,14 +484,15 @@
       <tokenizer class="solr.StandardTokenizerFactory"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
       -->
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>
   </fieldType>
@@ -505,7 +506,8 @@
     <analyzer type="index">
       <tokenizer class="solr.StandardTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
       -->
       <!-- Case insensitive stop word removal.
       -->
@@ -523,7 +525,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -552,7 +554,7 @@
     <analyzer type="index">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!-- Case insensitive stop word removal.
       -->
@@ -569,7 +571,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -588,7 +590,7 @@
              autoGeneratePhraseQueries="true">
     <analyzer type="index">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
@@ -602,7 +604,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1"
               catenateNumbers="1" catenateAll="0"/>
@@ -627,7 +629,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.StandardTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.LowerCaseFilterFactory"/>
     </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
----------------------------------------------------------------------
diff --git a/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml b/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
index 3071968..a0c29c9 100644
--- a/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
+++ b/solr/core/src/test-files/solr/collection1/conf/schemasurround.xml
@@ -116,7 +116,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -147,7 +147,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory"
               ignoreCase="true"
               words="stopwords.txt"
@@ -371,9 +371,14 @@
 
   <!-- less flexible in matching skus, but less chance of a false match -->
   <fieldType name="syn" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt"/>
     </analyzer>
   </fieldType>
 
@@ -383,7 +388,7 @@
     <analyzer type="index">
       <tokenizer class="solr.MockTokenizerFactory"/>
       <!-- in this example, we will only use synonyms at query time
-      <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
       -->
       <!--<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>-->
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
@@ -394,7 +399,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
@@ -415,7 +420,7 @@
     </analyzer>
     <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+      <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
       <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
               catenateNumbers="0" catenateAll="0" splitOnCaseChange="0"/>
       <filter class="solr.LowerCaseFilterFactory"/>
@@ -427,9 +432,17 @@
        synonyms "better"
     -->
   <fieldType name="dedup" class="solr.TextField">
-    <analyzer>
+    <analyzer type="index">
+      <tokenizer class="solr.MockTokenizerFactory"/>
+      <filter class="solr.SynonymGraphFilterFactory"
+              synonyms="synonyms.txt" expand="true"/>
+      <filter class="solr.PorterStemFilterFactory"/>
+      <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      <filter class="solr.FlattenGraphFilterFactory"/>
+    </analyzer>
+    <analyzer type="query">
       <tokenizer class="solr.MockTokenizerFactory"/>
-      <filter class="solr.SynonymFilterFactory"
+      <filter class="solr.SynonymGraphFilterFactory"
               synonyms="synonyms.txt" expand="true"/>
       <filter class="solr.PorterStemFilterFactory"/>
       <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/example/example-DIH/solr/db/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/db/conf/managed-schema b/solr/example/example-DIH/solr/db/conf/managed-schema
index 1a1012f..067767e 100644
--- a/solr/example/example-DIH/solr/db/conf/managed-schema
+++ b/solr/example/example-DIH/solr/db/conf/managed-schema
@@ -443,14 +443,15 @@
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldType>
@@ -464,7 +465,8 @@
       <analyzer type="index">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -482,7 +484,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -510,7 +512,7 @@
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -526,7 +528,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -543,7 +545,7 @@
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -556,7 +558,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -580,7 +582,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/example/example-DIH/solr/mail/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/mail/conf/managed-schema b/solr/example/example-DIH/solr/mail/conf/managed-schema
index 016f105..382dacf 100644
--- a/solr/example/example-DIH/solr/mail/conf/managed-schema
+++ b/solr/example/example-DIH/solr/mail/conf/managed-schema
@@ -362,14 +362,15 @@
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldType>
@@ -383,7 +384,8 @@
       <analyzer type="index">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -401,7 +403,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -429,7 +431,8 @@
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -445,7 +448,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -462,7 +465,7 @@
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -475,7 +478,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -499,7 +502,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/1a80e4d6/solr/example/example-DIH/solr/rss/conf/managed-schema
----------------------------------------------------------------------
diff --git a/solr/example/example-DIH/solr/rss/conf/managed-schema b/solr/example/example-DIH/solr/rss/conf/managed-schema
index 2064c58..9417902 100644
--- a/solr/example/example-DIH/solr/rss/conf/managed-schema
+++ b/solr/example/example-DIH/solr/rss/conf/managed-schema
@@ -239,7 +239,7 @@
         <charFilter class="solr.HTMLStripCharFilterFactory"/>
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
         -->
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
@@ -252,7 +252,7 @@
       <analyzer type="query">
         <charFilter class="solr.HTMLStripCharFilterFactory"/>
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -394,14 +394,15 @@
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>
     </fieldType>
@@ -415,7 +416,8 @@
       <analyzer type="index">
         <tokenizer class="solr.StandardTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.FlattenGraphFilterFactory"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -433,7 +435,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -461,7 +463,7 @@
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
         <!-- in this example, we will only use synonyms at query time
-        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
         -->
         <!-- Case insensitive stop word removal.
         -->
@@ -477,7 +479,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory"
                 ignoreCase="true"
                 words="lang/stopwords_en.txt"
@@ -494,7 +496,7 @@
     <fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
       <analyzer type="index">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -507,7 +509,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.WhitespaceTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt"/>
         <filter class="solr.WordDelimiterGraphFilterFactory" generateWordParts="0" generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
         <filter class="solr.LowerCaseFilterFactory"/>
@@ -531,7 +533,7 @@
       </analyzer>
       <analyzer type="query">
         <tokenizer class="solr.StandardTokenizerFactory"/>
-        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.SynonymGraphFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true"/>
         <filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
         <filter class="solr.LowerCaseFilterFactory"/>
       </analyzer>


[47/52] [abbrv] lucene-solr:jira/solr-9959: switch to advanceExact

Posted by ab...@apache.org.
switch to advanceExact


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/99af8302
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/99af8302
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/99af8302

Branch: refs/heads/jira/solr-9959
Commit: 99af830223a4ef69387586531e56338590659f7c
Parents: ef82183
Author: Mike McCandless <mi...@apache.org>
Authored: Sun Apr 2 16:25:09 2017 -0400
Committer: Mike McCandless <mi...@apache.org>
Committed: Sun Apr 2 16:25:09 2017 -0400

----------------------------------------------------------------------
 .../org/apache/lucene/search/DiversifiedTopDocsCollector.java   | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/99af8302/lucene/misc/src/java/org/apache/lucene/search/DiversifiedTopDocsCollector.java
----------------------------------------------------------------------
diff --git a/lucene/misc/src/java/org/apache/lucene/search/DiversifiedTopDocsCollector.java b/lucene/misc/src/java/org/apache/lucene/search/DiversifiedTopDocsCollector.java
index 6b75622..1a087d4 100644
--- a/lucene/misc/src/java/org/apache/lucene/search/DiversifiedTopDocsCollector.java
+++ b/lucene/misc/src/java/org/apache/lucene/search/DiversifiedTopDocsCollector.java
@@ -124,10 +124,7 @@ public abstract class DiversifiedTopDocsCollector extends
     // a leaf reader value when looking up keys
     int leafDocID = addition.doc - docBase;
     long value;
-    if (keys.docID() < leafDocID) {
-      keys.advance(leafDocID);
-    }
-    if (keys.docID() == leafDocID) {
+    if (keys.advanceExact(leafDocID)) {
       value = keys.longValue();
     } else {
       value = 0;


[25/52] [abbrv] lucene-solr:jira/solr-9959: LUCENE-7755: Join queries should not reference IndexReaders.

Posted by ab...@apache.org.
LUCENE-7755: Join queries should not reference IndexReaders.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/edafcbad
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/edafcbad
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/edafcbad

Branch: refs/heads/jira/solr-9959
Commit: edafcbad14482f3cd2f072fdca0c89600e72885d
Parents: edcdc30
Author: Adrien Grand <jp...@gmail.com>
Authored: Thu Mar 30 15:11:52 2017 +0200
Committer: Adrien Grand <jp...@gmail.com>
Committed: Thu Mar 30 15:11:52 2017 +0200

----------------------------------------------------------------------
 lucene/CHANGES.txt                                  |  7 +++++++
 .../org/apache/lucene/index/IndexReaderContext.java | 10 +++++++++-
 .../lucene/search/join/GlobalOrdinalsQuery.java     | 16 ++++++++++------
 .../search/join/GlobalOrdinalsWithScoreQuery.java   | 16 ++++++++++------
 .../org/apache/lucene/search/join/JoinUtil.java     |  8 +++-----
 5 files changed, 39 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/edafcbad/lucene/CHANGES.txt
----------------------------------------------------------------------
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 92f01a9..da643ff 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -93,6 +93,13 @@ Other
 * LUCENE-7743: Never call new String(String).
   (Daniel Jelinski via Adrien Grand)
 
+======================= Lucene 6.5.1 =======================
+
+Bug Fixes
+
+* LUCENE-7755: Fixed join queries to not reference IndexReaders, as it could
+  cause leaks if they are cached. (Adrien Grand)
+
 ======================= Lucene 6.5.0 =======================
 
 API Changes

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/edafcbad/lucene/core/src/java/org/apache/lucene/index/IndexReaderContext.java
----------------------------------------------------------------------
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexReaderContext.java b/lucene/core/src/java/org/apache/lucene/index/IndexReaderContext.java
index dada3ff..bca7a14 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexReaderContext.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexReaderContext.java
@@ -46,7 +46,15 @@ public abstract class IndexReaderContext {
     this.ordInParent = ordInParent;
     this.isTopLevel = parent==null;
   }
-  
+
+  /** Expert: Return an {@link Object} that uniquely identifies this context.
+   *  The returned object does neither reference this {@link IndexReaderContext}
+   *  nor the wrapped {@link IndexReader}.
+   *  @lucene.experimental */
+  public Object id() {
+    return identity;
+  }
+
   /** Returns the {@link IndexReader}, this context represents. */
   public abstract IndexReader reader();
   

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/edafcbad/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsQuery.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsQuery.java
index 93edcc0..5aaca1a 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsQuery.java
@@ -20,7 +20,7 @@ import java.io.IOException;
 import java.util.Set;
 
 import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiDocValues;
 import org.apache.lucene.index.SortedDocValues;
@@ -48,19 +48,23 @@ final class GlobalOrdinalsQuery extends Query {
 
   // just for hashcode and equals:
   private final Query fromQuery;
-  private final IndexReader indexReader;
+  // id of the context rather than the context itself in order not to hold references to index readers
+  private final Object indexReaderContextId;
 
-  GlobalOrdinalsQuery(LongBitSet foundOrds, String joinField, MultiDocValues.OrdinalMap globalOrds, Query toQuery, Query fromQuery, IndexReader indexReader) {
+  GlobalOrdinalsQuery(LongBitSet foundOrds, String joinField, MultiDocValues.OrdinalMap globalOrds, Query toQuery, Query fromQuery, IndexReaderContext context) {
     this.foundOrds = foundOrds;
     this.joinField = joinField;
     this.globalOrds = globalOrds;
     this.toQuery = toQuery;
     this.fromQuery = fromQuery;
-    this.indexReader = indexReader;
+    this.indexReaderContextId = context.id();
   }
 
   @Override
   public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
+    if (searcher.getTopReaderContext().id() != indexReaderContextId) {
+      throw new IllegalStateException("Creating the weight against a different index reader than this query has been built for.");
+    }
     return new W(this, toQuery.createWeight(searcher, false, 1f), boost);
   }
 
@@ -74,7 +78,7 @@ final class GlobalOrdinalsQuery extends Query {
     return fromQuery.equals(other.fromQuery) &&
            joinField.equals(other.joinField) &&
            toQuery.equals(other.toQuery) &&
-           indexReader.equals(other.indexReader);
+           indexReaderContextId.equals(other.indexReaderContextId);
   }
 
   @Override
@@ -83,7 +87,7 @@ final class GlobalOrdinalsQuery extends Query {
     result = 31 * result + joinField.hashCode();
     result = 31 * result + toQuery.hashCode();
     result = 31 * result + fromQuery.hashCode();
-    result = 31 * result + indexReader.hashCode();
+    result = 31 * result + indexReaderContextId.hashCode();
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/edafcbad/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreQuery.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreQuery.java
index 0aedf35..5e614ea 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/GlobalOrdinalsWithScoreQuery.java
@@ -20,7 +20,7 @@ import java.io.IOException;
 import java.util.Set;
 
 import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReaderContext;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiDocValues;
 import org.apache.lucene.index.SortedDocValues;
@@ -48,9 +48,10 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
   private final Query fromQuery;
   private final int min;
   private final int max;
-  private final IndexReader indexReader;
+  // id of the context rather than the context itself in order not to hold references to index readers
+  private final Object indexReaderContextId;
 
-  GlobalOrdinalsWithScoreQuery(GlobalOrdinalsWithScoreCollector collector, String joinField, MultiDocValues.OrdinalMap globalOrds, Query toQuery, Query fromQuery, int min, int max, IndexReader indexReader) {
+  GlobalOrdinalsWithScoreQuery(GlobalOrdinalsWithScoreCollector collector, String joinField, MultiDocValues.OrdinalMap globalOrds, Query toQuery, Query fromQuery, int min, int max, IndexReaderContext context) {
     this.collector = collector;
     this.joinField = joinField;
     this.globalOrds = globalOrds;
@@ -58,11 +59,14 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
     this.fromQuery = fromQuery;
     this.min = min;
     this.max = max;
-    this.indexReader = indexReader;
+    this.indexReaderContextId = context.id();
   }
 
   @Override
   public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
+    if (searcher.getTopReaderContext().id() != indexReaderContextId) {
+      throw new IllegalStateException("Creating the weight against a different index reader than this query has been built for.");
+    }
     return new W(this, toQuery.createWeight(searcher, false, 1f));
   }
 
@@ -78,7 +82,7 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
            joinField.equals(other.joinField) &&
            fromQuery.equals(other.fromQuery) &&
            toQuery.equals(other.toQuery) &&
-           indexReader.equals(other.indexReader);
+           indexReaderContextId.equals(other.indexReaderContextId);
   }
 
   @Override
@@ -89,7 +93,7 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
     result = 31 * result + fromQuery.hashCode();
     result = 31 * result + min;
     result = 31 * result + max;
-    result = 31 * result + indexReader.hashCode();
+    result = 31 * result + indexReaderContextId.hashCode();
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/edafcbad/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
----------------------------------------------------------------------
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java b/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
index bfc1f9b..537b224 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/JoinUtil.java
@@ -32,7 +32,6 @@ import org.apache.lucene.document.LongPoint;
 import org.apache.lucene.index.BinaryDocValues;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.LeafReader;
 import org.apache.lucene.index.LeafReaderContext;
 import org.apache.lucene.index.MultiDocValues;
@@ -467,8 +466,7 @@ public final class JoinUtil {
                                       MultiDocValues.OrdinalMap ordinalMap,
                                       int min,
                                       int max) throws IOException {
-    IndexReader indexReader = searcher.getIndexReader();
-    int numSegments = indexReader.leaves().size();
+    int numSegments = searcher.getIndexReader().leaves().size();
     final long valueCount;
     if (numSegments == 0) {
       return new MatchNoDocsQuery("JoinUtil.createJoinQuery with no segments");
@@ -509,7 +507,7 @@ public final class JoinUtil {
         if (min <= 0 && max == Integer.MAX_VALUE) {
           GlobalOrdinalsCollector globalOrdinalsCollector = new GlobalOrdinalsCollector(joinField, ordinalMap, valueCount);
           searcher.search(rewrittenFromQuery, globalOrdinalsCollector);
-          return new GlobalOrdinalsQuery(globalOrdinalsCollector.getCollectorOrdinals(), joinField, ordinalMap, rewrittenToQuery, rewrittenFromQuery, indexReader);
+          return new GlobalOrdinalsQuery(globalOrdinalsCollector.getCollectorOrdinals(), joinField, ordinalMap, rewrittenToQuery, rewrittenFromQuery, searcher.getTopReaderContext());
         } else {
           globalOrdinalsWithScoreCollector = new GlobalOrdinalsWithScoreCollector.NoScore(joinField, ordinalMap, valueCount, min, max);
           break;
@@ -518,7 +516,7 @@ public final class JoinUtil {
         throw new IllegalArgumentException(String.format(Locale.ROOT, "Score mode %s isn't supported.", scoreMode));
     }
     searcher.search(rewrittenFromQuery, globalOrdinalsWithScoreCollector);
-    return new GlobalOrdinalsWithScoreQuery(globalOrdinalsWithScoreCollector, joinField, ordinalMap, rewrittenToQuery, rewrittenFromQuery, min, max, indexReader);
+    return new GlobalOrdinalsWithScoreQuery(globalOrdinalsWithScoreCollector, joinField, ordinalMap, rewrittenToQuery, rewrittenFromQuery, min, max, searcher.getTopReaderContext());
   }
 
 }