You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@jackrabbit.apache.org by "fabriziofortino (via GitHub)" <gi...@apache.org> on 2023/02/24 15:00:56 UTC

[GitHub] [jackrabbit-oak] fabriziofortino opened a new pull request, #860: OAK-10111: support for custom analyzers in elastic

fabriziofortino opened a new pull request, #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860

   Support for custom analyzers in Elasticsearch with automatic conversion of lucene configuration. Increased test coverage.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [jackrabbit-oak] fabriziofortino merged pull request #860: OAK-10111: support for custom analyzers in elastic

Posted by "fabriziofortino (via GitHub)" <gi...@apache.org>.
fabriziofortino merged PR #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [jackrabbit-oak] fabriziofortino commented on a diff in pull request #860: OAK-10111: support for custom analyzers in elastic

Posted by "fabriziofortino (via GitHub)" <gi...@apache.org>.
fabriziofortino commented on code in PR #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860#discussion_r1127637218


##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }

Review Comment:
   `Map.of()` was introduced in Java 9. Is it okay to include it? There is currently no use of it in the repo.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [jackrabbit-oak] fabriziofortino commented on a diff in pull request #860: OAK-10111: support for custom analyzers in elastic

Posted by "fabriziofortino (via GitHub)" <gi...@apache.org>.
fabriziofortino commented on code in PR #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860#discussion_r1127674784


##########
oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticFullTextIndexCommonTest.java:
##########
@@ -67,4 +72,27 @@ protected List<String> getExpectedLogMessage() {
         expectedLogList.add(log2);
         return expectedLogList;
     }
+
+    @Test
+    /*
+     * analyzers by name are not possible in lucene, this test can run on elastic only
+     */
+    public void fulltextSearchWithBuiltInAnalyzerName() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_NAME, "german");

Review Comment:
   That's not supported in Lucene. Analyzers need to be fully qualified class names. Since this is possible in Elastic, I think it is right to have a test here only. I don't see the point of having the same test in Lucene that verifies that an Exception gets thrown.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [jackrabbit-oak] reschke commented on a diff in pull request #860: OAK-10111: support for custom analyzers in elastic

Posted by "reschke (via GitHub)" <gi...@apache.org>.
reschke commented on code in PR #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860#discussion_r1127657649


##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))

Review Comment:
   Yes, unless we need to backport this to an earlier version.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [jackrabbit-oak] nfsantos commented on a diff in pull request #860: OAK-10111: support for custom analyzers in elastic

Posted by "nfsantos (via GitHub)" <gi...@apache.org>.
nfsantos commented on code in PR #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860#discussion_r1126414469


##########
oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FullTextIndexCommonTest.java:
##########
@@ -151,52 +171,253 @@ public void pathTransformationsWithPathRestrictions() throws Exception {
 
         assertEventually(() -> {
             // ALL CHILDREN
-            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d"));
-            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
-            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c","/test/b","/test/c/d/j:c",
+            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d"));
+            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
+            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, asList("/test/a/j:c","/test/b","/test/c/d/j:c",
                     "/test/e/temp:c", "/test/f/d/temp:c","/test/g/e/temp:c"));
 
             // DIRECT CHILDREN
-            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/e"));
-            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/b"));
+            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/e"));
+            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, singletonList("/test/b"));
 
             // EXACT
-            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c"));
-            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c"));
+            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, singletonList("/test/c"));
+            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, singletonList("/test/a/j:c"));
 
             // PARENT
-
             assertQuery("select a.[jcr:path] as [jcr:path] from [nt:base] as a \n" +
                     "  inner join [nt:base] as b on ischildnode(b, a)\n" +
                     "  where isdescendantnode(a, '/tmp') \n" +
                     "  and b.[analyzed_field] = 'bar'\n" +
-                    "  and a.[abc] is not null ", SQL2, Arrays.asList("/tmp/a", "/tmp/c/d"));
+                    "  and a.[abc] is not null ", SQL2, asList("/tmp/a", "/tmp/c/d"));
         });
     }
 
+    @Test
+    public void fulltextSearchWithBuiltInAnalyzerClass() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_CLASS, "org.apache.lucene.analysis.en.EnglishAnalyzer");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "fox jumping");
+        root.commit();
+
+        // standard english analyzer stems verbs (jumping -> jump)
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, 'jump')", singletonList("/test")));

Review Comment:
   As before, suggest adding another property which will not be matched. And add a query that will not return results, for instance, searching for 'jum' or 'jumpingjack'.



##########
oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FullTextIndexCommonTest.java:
##########
@@ -40,72 +53,76 @@ protected void assertEventually(Runnable r) {
 
     @Test
     public void defaultAnalyzer() throws Exception {
-        Tree test = setup();
+        setup();
 
+        Tree test = root.getTree("/").addChild("test");
         test.addChild("a").setProperty("analyzed_field", "sun.jpg");
         root.commit();

Review Comment:
   Suggesting: add an additional property to the tree which does not match the query condition, to test that the code is only returning the expected property. Without it, this test would pass if the query engine returns all properties, regardless of matching the query condition. 
   
   Same suggestion for the reminder of the tests in this file.



##########
oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FullTextIndexCommonTest.java:
##########
@@ -151,52 +171,253 @@ public void pathTransformationsWithPathRestrictions() throws Exception {
 
         assertEventually(() -> {
             // ALL CHILDREN
-            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d"));
-            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
-            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c","/test/b","/test/c/d/j:c",
+            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d"));
+            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
+            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, asList("/test/a/j:c","/test/b","/test/c/d/j:c",
                     "/test/e/temp:c", "/test/f/d/temp:c","/test/g/e/temp:c"));
 
             // DIRECT CHILDREN
-            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/e"));
-            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/b"));
+            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/e"));
+            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, singletonList("/test/b"));
 
             // EXACT
-            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c"));
-            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c"));
+            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, singletonList("/test/c"));
+            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, singletonList("/test/a/j:c"));
 
             // PARENT
-
             assertQuery("select a.[jcr:path] as [jcr:path] from [nt:base] as a \n" +
                     "  inner join [nt:base] as b on ischildnode(b, a)\n" +
                     "  where isdescendantnode(a, '/tmp') \n" +
                     "  and b.[analyzed_field] = 'bar'\n" +
-                    "  and a.[abc] is not null ", SQL2, Arrays.asList("/tmp/a", "/tmp/c/d"));
+                    "  and a.[abc] is not null ", SQL2, asList("/tmp/a", "/tmp/c/d"));
         });
     }
 
+    @Test
+    public void fulltextSearchWithBuiltInAnalyzerClass() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_CLASS, "org.apache.lucene.analysis.en.EnglishAnalyzer");

Review Comment:
   Is there a test for error handling when the analyzer class does not exist?



##########
oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticFullTextIndexCommonTest.java:
##########
@@ -67,4 +72,27 @@ protected List<String> getExpectedLogMessage() {
         expectedLogList.add(log2);
         return expectedLogList;
     }
+
+    @Test
+    /*
+     * analyzers by name are not possible in lucene, this test can run on elastic only
+     */
+    public void fulltextSearchWithBuiltInAnalyzerName() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_NAME, "german");

Review Comment:
   How does Lucene handle if we specify an analyzer by name? Should we have a test for that error case?



##########
oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FullTextIndexCommonTest.java:
##########
@@ -151,52 +171,253 @@ public void pathTransformationsWithPathRestrictions() throws Exception {
 
         assertEventually(() -> {
             // ALL CHILDREN
-            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d"));
-            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
-            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c","/test/b","/test/c/d/j:c",
+            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d"));
+            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
+            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, asList("/test/a/j:c","/test/b","/test/c/d/j:c",
                     "/test/e/temp:c", "/test/f/d/temp:c","/test/g/e/temp:c"));
 
             // DIRECT CHILDREN
-            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/e"));
-            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/b"));
+            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/e"));
+            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, singletonList("/test/b"));
 
             // EXACT
-            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c"));
-            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c"));
+            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, singletonList("/test/c"));
+            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, singletonList("/test/a/j:c"));
 
             // PARENT
-
             assertQuery("select a.[jcr:path] as [jcr:path] from [nt:base] as a \n" +
                     "  inner join [nt:base] as b on ischildnode(b, a)\n" +
                     "  where isdescendantnode(a, '/tmp') \n" +
                     "  and b.[analyzed_field] = 'bar'\n" +
-                    "  and a.[abc] is not null ", SQL2, Arrays.asList("/tmp/a", "/tmp/c/d"));
+                    "  and a.[abc] is not null ", SQL2, asList("/tmp/a", "/tmp/c/d"));
         });
     }
 
+    @Test
+    public void fulltextSearchWithBuiltInAnalyzerClass() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_CLASS, "org.apache.lucene.analysis.en.EnglishAnalyzer");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "fox jumping");
+        root.commit();
+
+        // standard english analyzer stems verbs (jumping -> jump)
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, 'jump')", singletonList("/test")));
+    }
+
+    @Test
+    public void fulltextSearchWithBuiltInAnalyzerClassAndConfigurationParams() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_CLASS, "org.apache.lucene.analysis.en.EnglishAnalyzer");
+            anl.setProperty("luceneMatchVersion", "LUCENE_47");
+            anl.addChild("stopwords").addChild(JCR_CONTENT).setProperty(JCR_DATA, "dog");
+        });
 
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "dog and cat");
+        root.commit();
+
+        // standard english analyzer stems verbs (jumping -> jump)
+        assertEventually(() -> {
+            assertQuery("select * from [nt:base] where CONTAINS(*, 'dog')", emptyList());
+            assertQuery("select * from [nt:base] where CONTAINS(*, 'cat')", singletonList("/test"));
+        });
+    }
+
+    @Test
+    public void fulltextSearchWithCustomComposedFilters() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "whitespace");
+
+            Tree stopFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Stop");
+            stopFilter.setProperty("words", "stop1.txt, stop2.txt");
+            stopFilter.addChild("stop1.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "foo");
+            stopFilter.addChild("stop2.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "and");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "fox jumping");
+        root.commit();
+
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, 'fox foo jumping')", singletonList("/test")));
+    }
+
+    @Test
+    public void fulltextSearchWithCustomComposedAnalyzer() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+
+            Tree charFilters = anl.addChild(FulltextIndexConstants.ANL_CHAR_FILTERS);
+            charFilters.addChild("HTMLStrip");
+            Tree mappingFilter = charFilters.addChild("Mapping");
+            mappingFilter.setProperty("mapping", "mappings.txt");
+            mappingFilter.addChild("mappings.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, getHinduArabicMapping());
+
+            Tree filters = anl.addChild(FulltextIndexConstants.ANL_FILTERS);
+            filters.addChild("LowerCase");
+            Tree stopFilter = filters.addChild("Stop");
+            stopFilter.setProperty("words", "stop1.txt, stop2.txt");
+            stopFilter.addChild("stop1.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "my");
+            stopFilter.addChild("stop2.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "is");
+            filters.addChild("PorterStem");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "My license plate is ٢٥٠١٥");
+        root.commit();
+
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, '25015')", singletonList("/test")));
+    }
+
+    protected String getHinduArabicMapping() {
+        // Hindu-Arabic numerals conversion from
+        // https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-mapping-charfilter.html
+        return "\"٠\" => \"0\"\n\"١\" => \"1\"\n\"٢\" => \"2\"\n\"٣\" => \"3\"\n\"٤\" => \"4\"\n" +
+                "\"٥\" => \"5\"\n\"٦\" => \"6\"\n\"٧\" => \"7\"\n\"٨\" => \"8\"\n\"٩\" => \"9\"";
+    }
+
+    //OAK-4805
+    @Test
+    public void badIndexDefinitionShouldLetQEWork() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            //This would allow index def to get committed. Else bad index def can't be created.
+            idx.setProperty(IndexConstants.ASYNC_PROPERTY_NAME, "async");
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+            Tree synFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Synonym");
+            synFilter.setProperty("synonyms", "syn.txt");
+            // Don't add syn.txt to make analyzer (and hence index def) invalid
+            // synFilter.addChild("syn.txt").addChild(JCR_CONTENT).setProperty(JCR_DATA, "blah, foo, bar");
+        });
+
+        //Using this version of executeQuery as we don't want a result row quoting the exception
+        assertEventually(() -> {
+            try {
+                executeQuery("SELECT * FROM [nt:base] where a='b'", SQL2, QueryEngine.NO_BINDINGS);
+            } catch (ParseException e) {
+                throw new RuntimeException(e);
+            }
+        });
+    }
+
+    @Test
+    public void testSynonyms() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+            Tree synFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Synonym");
+            synFilter.setProperty("synonyms", "syn.txt");
+            synFilter.addChild("syn.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "plane, airplane, aircraft\nflies=>scars");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        Tree testNodeChild = test.addChild("node");
+        testNodeChild.setProperty("foo", "an aircraft flies");
+        root.commit();
+
+        assertEventually(() -> {
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'plane')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'airplane')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'aircraft')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'scars')", singletonList("/test/node"));
+        });
+    }
+
+    //OAK-4516
+    @Test
+    public void wildcardQueryToLookupUnanalyzedText() throws Exception {
+        Tree index = setup(builder -> {
+            builder.indexRule("nt:base").property("propa").analyzed();
+            builder.indexRule("nt:base").property("propb").nodeScopeIndex();
+        }, idx -> idx.addChild(ANALYZERS).setProperty(FulltextIndexConstants.INDEX_ORIGINAL_TERM, true),
+                "propa", "propb");
+
+        Tree rootTree = root.getTree("/");
+        Tree node1Tree = rootTree.addChild("node1");
+        node1Tree.setProperty("propa", "abcdef");
+        node1Tree.setProperty("propb", "abcdef");
+        Tree node2Tree = rootTree.addChild("node2");
+        node2Tree.setProperty("propa", "abc_def");
+        node2Tree.setProperty("propb", "abc_def");
+        root.commit();
+
+        String fullIndexName = indexOptions.getIndexType() + ":" + index.getName();
+
+        assertEventually(() -> {
+            //normal query still works
+            String query = "select [jcr:path] from [nt:base] where contains('propa', 'abc*')";
+            String explanation = explain(query);
+            assertThat(explanation, containsString(fullIndexName));
+            assertQuery(query, asList("/node1", "/node2"));

Review Comment:
   All the usages of `asList` and `singletonList` can now be replaced by `List.of(...)`



##########
oak-search-elastic/src/test/java/org/apache/jackrabbit/oak/plugins/index/elastic/ElasticFullTextIndexCommonTest.java:
##########
@@ -67,4 +72,27 @@ protected List<String> getExpectedLogMessage() {
         expectedLogList.add(log2);
         return expectedLogList;
     }
+
+    @Test
+    /*
+     * analyzers by name are not possible in lucene, this test can run on elastic only
+     */
+    public void fulltextSearchWithBuiltInAnalyzerName() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_NAME, "german");

Review Comment:
   I think we should have some more tests for error handling, for instance, if the name given here is not valid.



##########
oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FullTextIndexCommonTest.java:
##########
@@ -151,52 +171,253 @@ public void pathTransformationsWithPathRestrictions() throws Exception {
 
         assertEventually(() -> {
             // ALL CHILDREN
-            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d"));
-            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
-            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c","/test/b","/test/c/d/j:c",
+            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d"));
+            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
+            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, asList("/test/a/j:c","/test/b","/test/c/d/j:c",
                     "/test/e/temp:c", "/test/f/d/temp:c","/test/g/e/temp:c"));
 
             // DIRECT CHILDREN
-            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/e"));
-            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/b"));
+            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/e"));
+            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, singletonList("/test/b"));
 
             // EXACT
-            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c"));
-            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c"));
+            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, singletonList("/test/c"));
+            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, singletonList("/test/a/j:c"));
 
             // PARENT
-
             assertQuery("select a.[jcr:path] as [jcr:path] from [nt:base] as a \n" +
                     "  inner join [nt:base] as b on ischildnode(b, a)\n" +
                     "  where isdescendantnode(a, '/tmp') \n" +
                     "  and b.[analyzed_field] = 'bar'\n" +
-                    "  and a.[abc] is not null ", SQL2, Arrays.asList("/tmp/a", "/tmp/c/d"));
+                    "  and a.[abc] is not null ", SQL2, asList("/tmp/a", "/tmp/c/d"));
         });
     }
 
+    @Test
+    public void fulltextSearchWithBuiltInAnalyzerClass() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_CLASS, "org.apache.lucene.analysis.en.EnglishAnalyzer");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "fox jumping");
+        root.commit();
+
+        // standard english analyzer stems verbs (jumping -> jump)
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, 'jump')", singletonList("/test")));
+    }
+
+    @Test
+    public void fulltextSearchWithBuiltInAnalyzerClassAndConfigurationParams() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_CLASS, "org.apache.lucene.analysis.en.EnglishAnalyzer");
+            anl.setProperty("luceneMatchVersion", "LUCENE_47");
+            anl.addChild("stopwords").addChild(JCR_CONTENT).setProperty(JCR_DATA, "dog");
+        });
 
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "dog and cat");
+        root.commit();
+
+        // standard english analyzer stems verbs (jumping -> jump)
+        assertEventually(() -> {
+            assertQuery("select * from [nt:base] where CONTAINS(*, 'dog')", emptyList());
+            assertQuery("select * from [nt:base] where CONTAINS(*, 'cat')", singletonList("/test"));
+        });
+    }
+
+    @Test
+    public void fulltextSearchWithCustomComposedFilters() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "whitespace");
+
+            Tree stopFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Stop");
+            stopFilter.setProperty("words", "stop1.txt, stop2.txt");
+            stopFilter.addChild("stop1.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "foo");
+            stopFilter.addChild("stop2.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "and");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "fox jumping");
+        root.commit();
+
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, 'fox foo jumping')", singletonList("/test")));
+    }
+
+    @Test
+    public void fulltextSearchWithCustomComposedAnalyzer() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+
+            Tree charFilters = anl.addChild(FulltextIndexConstants.ANL_CHAR_FILTERS);
+            charFilters.addChild("HTMLStrip");
+            Tree mappingFilter = charFilters.addChild("Mapping");
+            mappingFilter.setProperty("mapping", "mappings.txt");
+            mappingFilter.addChild("mappings.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, getHinduArabicMapping());
+
+            Tree filters = anl.addChild(FulltextIndexConstants.ANL_FILTERS);
+            filters.addChild("LowerCase");
+            Tree stopFilter = filters.addChild("Stop");
+            stopFilter.setProperty("words", "stop1.txt, stop2.txt");
+            stopFilter.addChild("stop1.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "my");
+            stopFilter.addChild("stop2.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "is");
+            filters.addChild("PorterStem");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "My license plate is ٢٥٠١٥");
+        root.commit();
+
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, '25015')", singletonList("/test")));
+    }
+
+    protected String getHinduArabicMapping() {
+        // Hindu-Arabic numerals conversion from
+        // https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-mapping-charfilter.html
+        return "\"٠\" => \"0\"\n\"١\" => \"1\"\n\"٢\" => \"2\"\n\"٣\" => \"3\"\n\"٤\" => \"4\"\n" +
+                "\"٥\" => \"5\"\n\"٦\" => \"6\"\n\"٧\" => \"7\"\n\"٨\" => \"8\"\n\"٩\" => \"9\"";
+    }
+
+    //OAK-4805
+    @Test
+    public void badIndexDefinitionShouldLetQEWork() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            //This would allow index def to get committed. Else bad index def can't be created.
+            idx.setProperty(IndexConstants.ASYNC_PROPERTY_NAME, "async");
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+            Tree synFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Synonym");
+            synFilter.setProperty("synonyms", "syn.txt");
+            // Don't add syn.txt to make analyzer (and hence index def) invalid
+            // synFilter.addChild("syn.txt").addChild(JCR_CONTENT).setProperty(JCR_DATA, "blah, foo, bar");
+        });
+
+        //Using this version of executeQuery as we don't want a result row quoting the exception
+        assertEventually(() -> {
+            try {
+                executeQuery("SELECT * FROM [nt:base] where a='b'", SQL2, QueryEngine.NO_BINDINGS);
+            } catch (ParseException e) {
+                throw new RuntimeException(e);
+            }
+        });
+    }
+
+    @Test
+    public void testSynonyms() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+            Tree synFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Synonym");
+            synFilter.setProperty("synonyms", "syn.txt");
+            synFilter.addChild("syn.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "plane, airplane, aircraft\nflies=>scars");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        Tree testNodeChild = test.addChild("node");
+        testNodeChild.setProperty("foo", "an aircraft flies");
+        root.commit();
+
+        assertEventually(() -> {
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'plane')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'airplane')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'aircraft')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'scars')", singletonList("/test/node"));
+        });
+    }
+
+    //OAK-4516
+    @Test
+    public void wildcardQueryToLookupUnanalyzedText() throws Exception {
+        Tree index = setup(builder -> {
+            builder.indexRule("nt:base").property("propa").analyzed();
+            builder.indexRule("nt:base").property("propb").nodeScopeIndex();
+        }, idx -> idx.addChild(ANALYZERS).setProperty(FulltextIndexConstants.INDEX_ORIGINAL_TERM, true),
+                "propa", "propb");

Review Comment:
   Not aligned. Suggest to use autoformat of IntelliJ.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [jackrabbit-oak] fabriziofortino commented on a diff in pull request #860: OAK-10111: support for custom analyzers in elastic

Posted by "fabriziofortino (via GitHub)" <gi...@apache.org>.
fabriziofortino commented on code in PR #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860#discussion_r1127687076


##########
oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FullTextIndexCommonTest.java:
##########
@@ -151,52 +171,253 @@ public void pathTransformationsWithPathRestrictions() throws Exception {
 
         assertEventually(() -> {
             // ALL CHILDREN
-            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d"));
-            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
-            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c","/test/b","/test/c/d/j:c",
+            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d"));
+            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
+            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, asList("/test/a/j:c","/test/b","/test/c/d/j:c",
                     "/test/e/temp:c", "/test/f/d/temp:c","/test/g/e/temp:c"));
 
             // DIRECT CHILDREN
-            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/e"));
-            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/b"));
+            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/e"));
+            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, singletonList("/test/b"));
 
             // EXACT
-            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c"));
-            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c"));
+            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, singletonList("/test/c"));
+            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, singletonList("/test/a/j:c"));
 
             // PARENT
-
             assertQuery("select a.[jcr:path] as [jcr:path] from [nt:base] as a \n" +
                     "  inner join [nt:base] as b on ischildnode(b, a)\n" +
                     "  where isdescendantnode(a, '/tmp') \n" +
                     "  and b.[analyzed_field] = 'bar'\n" +
-                    "  and a.[abc] is not null ", SQL2, Arrays.asList("/tmp/a", "/tmp/c/d"));
+                    "  and a.[abc] is not null ", SQL2, asList("/tmp/a", "/tmp/c/d"));
         });
     }
 
+    @Test
+    public void fulltextSearchWithBuiltInAnalyzerClass() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_CLASS, "org.apache.lucene.analysis.en.EnglishAnalyzer");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "fox jumping");
+        root.commit();
+
+        // standard english analyzer stems verbs (jumping -> jump)
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, 'jump')", singletonList("/test")));
+    }
+
+    @Test
+    public void fulltextSearchWithBuiltInAnalyzerClassAndConfigurationParams() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_CLASS, "org.apache.lucene.analysis.en.EnglishAnalyzer");
+            anl.setProperty("luceneMatchVersion", "LUCENE_47");
+            anl.addChild("stopwords").addChild(JCR_CONTENT).setProperty(JCR_DATA, "dog");
+        });
 
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "dog and cat");
+        root.commit();
+
+        // standard english analyzer stems verbs (jumping -> jump)
+        assertEventually(() -> {
+            assertQuery("select * from [nt:base] where CONTAINS(*, 'dog')", emptyList());
+            assertQuery("select * from [nt:base] where CONTAINS(*, 'cat')", singletonList("/test"));
+        });
+    }
+
+    @Test
+    public void fulltextSearchWithCustomComposedFilters() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "whitespace");
+
+            Tree stopFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Stop");
+            stopFilter.setProperty("words", "stop1.txt, stop2.txt");
+            stopFilter.addChild("stop1.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "foo");
+            stopFilter.addChild("stop2.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "and");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "fox jumping");
+        root.commit();
+
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, 'fox foo jumping')", singletonList("/test")));
+    }
+
+    @Test
+    public void fulltextSearchWithCustomComposedAnalyzer() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+
+            Tree charFilters = anl.addChild(FulltextIndexConstants.ANL_CHAR_FILTERS);
+            charFilters.addChild("HTMLStrip");
+            Tree mappingFilter = charFilters.addChild("Mapping");
+            mappingFilter.setProperty("mapping", "mappings.txt");
+            mappingFilter.addChild("mappings.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, getHinduArabicMapping());
+
+            Tree filters = anl.addChild(FulltextIndexConstants.ANL_FILTERS);
+            filters.addChild("LowerCase");
+            Tree stopFilter = filters.addChild("Stop");
+            stopFilter.setProperty("words", "stop1.txt, stop2.txt");
+            stopFilter.addChild("stop1.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "my");
+            stopFilter.addChild("stop2.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "is");
+            filters.addChild("PorterStem");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "My license plate is ٢٥٠١٥");
+        root.commit();
+
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, '25015')", singletonList("/test")));
+    }
+
+    protected String getHinduArabicMapping() {
+        // Hindu-Arabic numerals conversion from
+        // https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-mapping-charfilter.html
+        return "\"٠\" => \"0\"\n\"١\" => \"1\"\n\"٢\" => \"2\"\n\"٣\" => \"3\"\n\"٤\" => \"4\"\n" +
+                "\"٥\" => \"5\"\n\"٦\" => \"6\"\n\"٧\" => \"7\"\n\"٨\" => \"8\"\n\"٩\" => \"9\"";
+    }
+
+    //OAK-4805
+    @Test
+    public void badIndexDefinitionShouldLetQEWork() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            //This would allow index def to get committed. Else bad index def can't be created.
+            idx.setProperty(IndexConstants.ASYNC_PROPERTY_NAME, "async");
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+            Tree synFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Synonym");
+            synFilter.setProperty("synonyms", "syn.txt");
+            // Don't add syn.txt to make analyzer (and hence index def) invalid
+            // synFilter.addChild("syn.txt").addChild(JCR_CONTENT).setProperty(JCR_DATA, "blah, foo, bar");
+        });
+
+        //Using this version of executeQuery as we don't want a result row quoting the exception
+        assertEventually(() -> {
+            try {
+                executeQuery("SELECT * FROM [nt:base] where a='b'", SQL2, QueryEngine.NO_BINDINGS);
+            } catch (ParseException e) {
+                throw new RuntimeException(e);
+            }
+        });
+    }
+
+    @Test
+    public void testSynonyms() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+            Tree synFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Synonym");
+            synFilter.setProperty("synonyms", "syn.txt");
+            synFilter.addChild("syn.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "plane, airplane, aircraft\nflies=>scars");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        Tree testNodeChild = test.addChild("node");
+        testNodeChild.setProperty("foo", "an aircraft flies");
+        root.commit();
+
+        assertEventually(() -> {
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'plane')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'airplane')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'aircraft')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'scars')", singletonList("/test/node"));
+        });
+    }
+
+    //OAK-4516
+    @Test
+    public void wildcardQueryToLookupUnanalyzedText() throws Exception {
+        Tree index = setup(builder -> {
+            builder.indexRule("nt:base").property("propa").analyzed();
+            builder.indexRule("nt:base").property("propb").nodeScopeIndex();
+        }, idx -> idx.addChild(ANALYZERS).setProperty(FulltextIndexConstants.INDEX_ORIGINAL_TERM, true),
+                "propa", "propb");
+
+        Tree rootTree = root.getTree("/");
+        Tree node1Tree = rootTree.addChild("node1");
+        node1Tree.setProperty("propa", "abcdef");
+        node1Tree.setProperty("propb", "abcdef");
+        Tree node2Tree = rootTree.addChild("node2");
+        node2Tree.setProperty("propa", "abc_def");
+        node2Tree.setProperty("propb", "abc_def");
+        root.commit();
+
+        String fullIndexName = indexOptions.getIndexType() + ":" + index.getName();
+
+        assertEventually(() -> {
+            //normal query still works
+            String query = "select [jcr:path] from [nt:base] where contains('propa', 'abc*')";
+            String explanation = explain(query);
+            assertThat(explanation, containsString(fullIndexName));
+            assertQuery(query, asList("/node1", "/node2"));

Review Comment:
   I did not use for the same reason explained above.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [jackrabbit-oak] fabriziofortino commented on a diff in pull request #860: OAK-10111: support for custom analyzers in elastic

Posted by "fabriziofortino (via GitHub)" <gi...@apache.org>.
fabriziofortino commented on code in PR #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860#discussion_r1127687076


##########
oak-search/src/test/java/org/apache/jackrabbit/oak/plugins/index/FullTextIndexCommonTest.java:
##########
@@ -151,52 +171,253 @@ public void pathTransformationsWithPathRestrictions() throws Exception {
 
         assertEventually(() -> {
             // ALL CHILDREN
-            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d"));
-            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
-            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c","/test/b","/test/c/d/j:c",
+            assertQuery("/jcr:root/test//*[j:c/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d"));
+            assertQuery("/jcr:root/test//*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/c/d", "/test/e", "/test/f/d", "/test/g/e"));
+            assertQuery("/jcr:root/test//*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test//*[analyzed_field = 'bar']", XPATH, asList("/test/a/j:c","/test/b","/test/c/d/j:c",
                     "/test/e/temp:c", "/test/f/d/temp:c","/test/g/e/temp:c"));
 
             // DIRECT CHILDREN
-            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a", "/test/e"));
-            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c", "/test/f"));
-            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/b"));
+            assertQuery("/jcr:root/test/*[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/*[*/analyzed_field = 'bar']", XPATH, asList("/test/a", "/test/e"));
+            assertQuery("/jcr:root/test/*[d/*/analyzed_field = 'bar']", XPATH, asList("/test/c", "/test/f"));
+            assertQuery("/jcr:root/test/*[analyzed_field = 'bar']", XPATH, singletonList("/test/b"));
 
             // EXACT
-            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a"));
-            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, Arrays.asList("/test/c"));
-            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, Arrays.asList("/test/a/j:c"));
+            assertQuery("/jcr:root/test/a[j:c/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/a[*/analyzed_field = 'bar']", XPATH, singletonList("/test/a"));
+            assertQuery("/jcr:root/test/c[d/*/analyzed_field = 'bar']", XPATH, singletonList("/test/c"));
+            assertQuery("/jcr:root/test/a/j:c[analyzed_field = 'bar']", XPATH, singletonList("/test/a/j:c"));
 
             // PARENT
-
             assertQuery("select a.[jcr:path] as [jcr:path] from [nt:base] as a \n" +
                     "  inner join [nt:base] as b on ischildnode(b, a)\n" +
                     "  where isdescendantnode(a, '/tmp') \n" +
                     "  and b.[analyzed_field] = 'bar'\n" +
-                    "  and a.[abc] is not null ", SQL2, Arrays.asList("/tmp/a", "/tmp/c/d"));
+                    "  and a.[abc] is not null ", SQL2, asList("/tmp/a", "/tmp/c/d"));
         });
     }
 
+    @Test
+    public void fulltextSearchWithBuiltInAnalyzerClass() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_CLASS, "org.apache.lucene.analysis.en.EnglishAnalyzer");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "fox jumping");
+        root.commit();
+
+        // standard english analyzer stems verbs (jumping -> jump)
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, 'jump')", singletonList("/test")));
+    }
+
+    @Test
+    public void fulltextSearchWithBuiltInAnalyzerClassAndConfigurationParams() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.setProperty(FulltextIndexConstants.ANL_CLASS, "org.apache.lucene.analysis.en.EnglishAnalyzer");
+            anl.setProperty("luceneMatchVersion", "LUCENE_47");
+            anl.addChild("stopwords").addChild(JCR_CONTENT).setProperty(JCR_DATA, "dog");
+        });
 
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "dog and cat");
+        root.commit();
+
+        // standard english analyzer stems verbs (jumping -> jump)
+        assertEventually(() -> {
+            assertQuery("select * from [nt:base] where CONTAINS(*, 'dog')", emptyList());
+            assertQuery("select * from [nt:base] where CONTAINS(*, 'cat')", singletonList("/test"));
+        });
+    }
+
+    @Test
+    public void fulltextSearchWithCustomComposedFilters() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "whitespace");
+
+            Tree stopFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Stop");
+            stopFilter.setProperty("words", "stop1.txt, stop2.txt");
+            stopFilter.addChild("stop1.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "foo");
+            stopFilter.addChild("stop2.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "and");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "fox jumping");
+        root.commit();
+
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, 'fox foo jumping')", singletonList("/test")));
+    }
+
+    @Test
+    public void fulltextSearchWithCustomComposedAnalyzer() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+
+            Tree charFilters = anl.addChild(FulltextIndexConstants.ANL_CHAR_FILTERS);
+            charFilters.addChild("HTMLStrip");
+            Tree mappingFilter = charFilters.addChild("Mapping");
+            mappingFilter.setProperty("mapping", "mappings.txt");
+            mappingFilter.addChild("mappings.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, getHinduArabicMapping());
+
+            Tree filters = anl.addChild(FulltextIndexConstants.ANL_FILTERS);
+            filters.addChild("LowerCase");
+            Tree stopFilter = filters.addChild("Stop");
+            stopFilter.setProperty("words", "stop1.txt, stop2.txt");
+            stopFilter.addChild("stop1.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "my");
+            stopFilter.addChild("stop2.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "is");
+            filters.addChild("PorterStem");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        test.setProperty("foo", "My license plate is ٢٥٠١٥");
+        root.commit();
+
+        assertEventually(() -> assertQuery("select * from [nt:base] where CONTAINS(*, '25015')", singletonList("/test")));
+    }
+
+    protected String getHinduArabicMapping() {
+        // Hindu-Arabic numerals conversion from
+        // https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis-mapping-charfilter.html
+        return "\"٠\" => \"0\"\n\"١\" => \"1\"\n\"٢\" => \"2\"\n\"٣\" => \"3\"\n\"٤\" => \"4\"\n" +
+                "\"٥\" => \"5\"\n\"٦\" => \"6\"\n\"٧\" => \"7\"\n\"٨\" => \"8\"\n\"٩\" => \"9\"";
+    }
+
+    //OAK-4805
+    @Test
+    public void badIndexDefinitionShouldLetQEWork() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            //This would allow index def to get committed. Else bad index def can't be created.
+            idx.setProperty(IndexConstants.ASYNC_PROPERTY_NAME, "async");
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+            Tree synFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Synonym");
+            synFilter.setProperty("synonyms", "syn.txt");
+            // Don't add syn.txt to make analyzer (and hence index def) invalid
+            // synFilter.addChild("syn.txt").addChild(JCR_CONTENT).setProperty(JCR_DATA, "blah, foo, bar");
+        });
+
+        //Using this version of executeQuery as we don't want a result row quoting the exception
+        assertEventually(() -> {
+            try {
+                executeQuery("SELECT * FROM [nt:base] where a='b'", SQL2, QueryEngine.NO_BINDINGS);
+            } catch (ParseException e) {
+                throw new RuntimeException(e);
+            }
+        });
+    }
+
+    @Test
+    public void testSynonyms() throws Exception {
+        setup(singletonList("foo"), idx -> {
+            Tree anl = idx.addChild(FulltextIndexConstants.ANALYZERS).addChild(FulltextIndexConstants.ANL_DEFAULT);
+            anl.addChild(FulltextIndexConstants.ANL_TOKENIZER).setProperty(FulltextIndexConstants.ANL_NAME, "Standard");
+            Tree synFilter = anl.addChild(FulltextIndexConstants.ANL_FILTERS).addChild("Synonym");
+            synFilter.setProperty("synonyms", "syn.txt");
+            synFilter.addChild("syn.txt").addChild(JcrConstants.JCR_CONTENT)
+                    .setProperty(JcrConstants.JCR_DATA, "plane, airplane, aircraft\nflies=>scars");
+        });
+
+        Tree test = root.getTree("/").addChild("test");
+        Tree testNodeChild = test.addChild("node");
+        testNodeChild.setProperty("foo", "an aircraft flies");
+        root.commit();
+
+        assertEventually(() -> {
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'plane')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'airplane')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'aircraft')", singletonList("/test/node"));
+            assertQuery("select * from [nt:base] where ISDESCENDANTNODE('/test') and CONTAINS(*, 'scars')", singletonList("/test/node"));
+        });
+    }
+
+    //OAK-4516
+    @Test
+    public void wildcardQueryToLookupUnanalyzedText() throws Exception {
+        Tree index = setup(builder -> {
+            builder.indexRule("nt:base").property("propa").analyzed();
+            builder.indexRule("nt:base").property("propb").nodeScopeIndex();
+        }, idx -> idx.addChild(ANALYZERS).setProperty(FulltextIndexConstants.INDEX_ORIGINAL_TERM, true),
+                "propa", "propb");
+
+        Tree rootTree = root.getTree("/");
+        Tree node1Tree = rootTree.addChild("node1");
+        node1Tree.setProperty("propa", "abcdef");
+        node1Tree.setProperty("propb", "abcdef");
+        Tree node2Tree = rootTree.addChild("node2");
+        node2Tree.setProperty("propa", "abc_def");
+        node2Tree.setProperty("propb", "abc_def");
+        root.commit();
+
+        String fullIndexName = indexOptions.getIndexType() + ":" + index.getName();
+
+        assertEventually(() -> {
+            //normal query still works
+            String query = "select [jcr:path] from [nt:base] where contains('propa', 'abc*')";
+            String explanation = explain(query);
+            assertThat(explanation, containsString(fullIndexName));
+            assertQuery(query, asList("/node1", "/node2"));

Review Comment:
   I did not use it for the same reason explained above.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [jackrabbit-oak] nfsantos commented on a diff in pull request #860: OAK-10111: support for custom analyzers in elastic

Posted by "nfsantos (via GitHub)" <gi...@apache.org>.
nfsantos commented on code in PR #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860#discussion_r1124641325


##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene

Review Comment:
   ```suggestion
    * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from Lucene
   ```



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }

Review Comment:
   Similar here using `Map.of()`



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))
+                    )));
+                }
+                return builder;
+            }
+        }
+        return null;
+    }
+
+    @NotNull
+    private static TokenizerDefinition loadTokenizer(NodeState state) {
+        String name = normalize(Objects.requireNonNull(state.getString(FulltextIndexConstants.ANL_NAME)));
+        Map<String, Object> args = convertNodeState(state);
+        args.put(ANALYZER_TYPE, name);
+        return new TokenizerDefinition(name, JsonData.of(args));
+    }
+
+    private static <FD> LinkedHashMap<String, FD> loadFilters(NodeState state,
+                                                              Function<String, Class<? extends AbstractAnalysisFactory>> lookup,
+                                                              BiFunction<String, JsonData, FD> factory) {
+        LinkedHashMap<String, FD> filters = new LinkedHashMap<>();
+        int i = 0;
+        Tree tree = TreeFactory.createReadOnlyTree(state);
+        for (Tree t : tree.getChildren()) {
+            NodeState child = state.getChildNode(t.getName());
+            Class<? extends AbstractAnalysisFactory> tff = lookup.apply(t.getName());
+            String name;
+            try {
+                name = normalize((String) tff.getField("NAME").get(null));
+            } catch (Exception e) {
+                LOG.warn("unable to get the filter name using reflection. Try using the normalized node name", e);
+                name = normalize(t.getName());
+            }
+            Optional<Map<String, String>> mappingOpt =
+                    CONFIGURATION_MAPPING.entrySet().stream().filter(k -> k.getKey().isAssignableFrom(tff)).map(Map.Entry::getValue).findFirst();
+            Map<String, Object> args = convertNodeState(child, mappingOpt.orElseGet(Collections::emptyMap));
+
+            args.put(ANALYZER_TYPE, name);
+
+            filters.put(name + "_" + i++, factory.apply(name, JsonData.of(args)));

Review Comment:
   Nitpick: I find the usage of the i++ increment operator inside the string concatenation confusing and error-prone. Suggest moving the increment to a separate line after the `.put` call. 



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))
+                    )));
+                }
+                return builder;
+            }
+        }
+        return null;
+    }
+
+    @NotNull
+    private static TokenizerDefinition loadTokenizer(NodeState state) {
+        String name = normalize(Objects.requireNonNull(state.getString(FulltextIndexConstants.ANL_NAME)));
+        Map<String, Object> args = convertNodeState(state);
+        args.put(ANALYZER_TYPE, name);
+        return new TokenizerDefinition(name, JsonData.of(args));
+    }
+
+    private static <FD> LinkedHashMap<String, FD> loadFilters(NodeState state,
+                                                              Function<String, Class<? extends AbstractAnalysisFactory>> lookup,
+                                                              BiFunction<String, JsonData, FD> factory) {
+        LinkedHashMap<String, FD> filters = new LinkedHashMap<>();
+        int i = 0;
+        Tree tree = TreeFactory.createReadOnlyTree(state);
+        for (Tree t : tree.getChildren()) {
+            NodeState child = state.getChildNode(t.getName());
+            Class<? extends AbstractAnalysisFactory> tff = lookup.apply(t.getName());
+            String name;
+            try {
+                name = normalize((String) tff.getField("NAME").get(null));
+            } catch (Exception e) {
+                LOG.warn("unable to get the filter name using reflection. Try using the normalized node name", e);
+                name = normalize(t.getName());
+            }
+            Optional<Map<String, String>> mappingOpt =
+                    CONFIGURATION_MAPPING.entrySet().stream().filter(k -> k.getKey().isAssignableFrom(tff)).map(Map.Entry::getValue).findFirst();

Review Comment:
   Break up this line to make it more legible.



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))
+                    )));
+                }
+                return builder;
+            }
+        }
+        return null;
+    }
+
+    @NotNull
+    private static TokenizerDefinition loadTokenizer(NodeState state) {
+        String name = normalize(Objects.requireNonNull(state.getString(FulltextIndexConstants.ANL_NAME)));
+        Map<String, Object> args = convertNodeState(state);
+        args.put(ANALYZER_TYPE, name);
+        return new TokenizerDefinition(name, JsonData.of(args));
+    }
+
+    private static <FD> LinkedHashMap<String, FD> loadFilters(NodeState state,
+                                                              Function<String, Class<? extends AbstractAnalysisFactory>> lookup,
+                                                              BiFunction<String, JsonData, FD> factory) {
+        LinkedHashMap<String, FD> filters = new LinkedHashMap<>();
+        int i = 0;
+        Tree tree = TreeFactory.createReadOnlyTree(state);
+        for (Tree t : tree.getChildren()) {
+            NodeState child = state.getChildNode(t.getName());
+            Class<? extends AbstractAnalysisFactory> tff = lookup.apply(t.getName());
+            String name;
+            try {
+                name = normalize((String) tff.getField("NAME").get(null));
+            } catch (Exception e) {
+                LOG.warn("unable to get the filter name using reflection. Try using the normalized node name", e);
+                name = normalize(t.getName());
+            }
+            Optional<Map<String, String>> mappingOpt =
+                    CONFIGURATION_MAPPING.entrySet().stream().filter(k -> k.getKey().isAssignableFrom(tff)).map(Map.Entry::getValue).findFirst();
+            Map<String, Object> args = convertNodeState(child, mappingOpt.orElseGet(Collections::emptyMap));

Review Comment:
   Move the `orElseGet` to be the last operation in the chain in the line above. I think it conveys better the intention of the code.



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))
+                    )));
+                }
+                return builder;
+            }
+        }
+        return null;
+    }
+
+    @NotNull
+    private static TokenizerDefinition loadTokenizer(NodeState state) {
+        String name = normalize(Objects.requireNonNull(state.getString(FulltextIndexConstants.ANL_NAME)));
+        Map<String, Object> args = convertNodeState(state);
+        args.put(ANALYZER_TYPE, name);
+        return new TokenizerDefinition(name, JsonData.of(args));
+    }
+
+    private static <FD> LinkedHashMap<String, FD> loadFilters(NodeState state,
+                                                              Function<String, Class<? extends AbstractAnalysisFactory>> lookup,
+                                                              BiFunction<String, JsonData, FD> factory) {
+        LinkedHashMap<String, FD> filters = new LinkedHashMap<>();
+        int i = 0;
+        Tree tree = TreeFactory.createReadOnlyTree(state);
+        for (Tree t : tree.getChildren()) {
+            NodeState child = state.getChildNode(t.getName());
+            Class<? extends AbstractAnalysisFactory> tff = lookup.apply(t.getName());
+            String name;
+            try {
+                name = normalize((String) tff.getField("NAME").get(null));
+            } catch (Exception e) {
+                LOG.warn("unable to get the filter name using reflection. Try using the normalized node name", e);
+                name = normalize(t.getName());
+            }
+            Optional<Map<String, String>> mappingOpt =
+                    CONFIGURATION_MAPPING.entrySet().stream().filter(k -> k.getKey().isAssignableFrom(tff)).map(Map.Entry::getValue).findFirst();
+            Map<String, Object> args = convertNodeState(child, mappingOpt.orElseGet(Collections::emptyMap));
+
+            args.put(ANALYZER_TYPE, name);
+
+            filters.put(name + "_" + i++, factory.apply(name, JsonData.of(args)));
+        }
+        return filters;
+    }
+
+    private static List<String> loadContent(NodeState file, String name) throws IOException {
+        List<String> result = new ArrayList<>();
+        Blob blob = ConfigUtil.getBlob(file, name);
+        Reader content = null;
+        try {
+            content = new InputStreamReader(Objects.requireNonNull(blob).getNewStream(), StandardCharsets.UTF_8);
+            BufferedReader br = null;
+            try {
+                br = new BufferedReader(content);
+                String word;
+                while ((word = br.readLine()) != null) {
+                    result.add(word.trim());
+                }
+            } finally {
+                IOUtils.close(br);
+            }
+            return result;
+        } finally {
+            IOUtils.close(content);
+        }

Review Comment:
   Any reason for not using try-with-resources?
   
   `BufferedReader.lines()` could be a simpler alternative the while loop.



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))
+                    )));
+                }
+                return builder;
+            }
+        }
+        return null;
+    }
+
+    @NotNull
+    private static TokenizerDefinition loadTokenizer(NodeState state) {
+        String name = normalize(Objects.requireNonNull(state.getString(FulltextIndexConstants.ANL_NAME)));
+        Map<String, Object> args = convertNodeState(state);
+        args.put(ANALYZER_TYPE, name);
+        return new TokenizerDefinition(name, JsonData.of(args));
+    }
+
+    private static <FD> LinkedHashMap<String, FD> loadFilters(NodeState state,
+                                                              Function<String, Class<? extends AbstractAnalysisFactory>> lookup,
+                                                              BiFunction<String, JsonData, FD> factory) {
+        LinkedHashMap<String, FD> filters = new LinkedHashMap<>();
+        int i = 0;
+        Tree tree = TreeFactory.createReadOnlyTree(state);
+        for (Tree t : tree.getChildren()) {
+            NodeState child = state.getChildNode(t.getName());
+            Class<? extends AbstractAnalysisFactory> tff = lookup.apply(t.getName());
+            String name;
+            try {
+                name = normalize((String) tff.getField("NAME").get(null));
+            } catch (Exception e) {
+                LOG.warn("unable to get the filter name using reflection. Try using the normalized node name", e);
+                name = normalize(t.getName());
+            }
+            Optional<Map<String, String>> mappingOpt =
+                    CONFIGURATION_MAPPING.entrySet().stream().filter(k -> k.getKey().isAssignableFrom(tff)).map(Map.Entry::getValue).findFirst();
+            Map<String, Object> args = convertNodeState(child, mappingOpt.orElseGet(Collections::emptyMap));
+
+            args.put(ANALYZER_TYPE, name);
+
+            filters.put(name + "_" + i++, factory.apply(name, JsonData.of(args)));
+        }
+        return filters;
+    }
+
+    private static List<String> loadContent(NodeState file, String name) throws IOException {
+        List<String> result = new ArrayList<>();
+        Blob blob = ConfigUtil.getBlob(file, name);
+        Reader content = null;
+        try {
+            content = new InputStreamReader(Objects.requireNonNull(blob).getNewStream(), StandardCharsets.UTF_8);
+            BufferedReader br = null;
+            try {
+                br = new BufferedReader(content);
+                String word;
+                while ((word = br.readLine()) != null) {
+                    result.add(word.trim());
+                }
+            } finally {
+                IOUtils.close(br);
+            }
+            return result;
+        } finally {
+            IOUtils.close(content);
+        }
+    }
+
+    /**
+     * Normalizes one of the following values:
+     * - lucene class (eg: org.apache.lucene.analysis.en.EnglishAnalyzer -> english)
+     * - lucene name (eg: Standard -> standard)
+     * into the elasticsearch compatible value
+     */
+    private static String normalize(String value) {
+        // this might be a full class, let's tokenize the value
+        String[] anlClassTokens = value.split("\\.");
+        // and take the last part
+        String name = anlClassTokens[anlClassTokens.length - 1];
+        // all options in elastic are in snake case
+        name = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_UNDERSCORE, name);
+        // if it ends with analyzer we need to get rid of it
+        if (name.endsWith("_analyzer")) {
+            name = name.replace("_analyzer", "");

Review Comment:
   ```suggestion
          name = name.substring(0, name.length() - "_analyzer".length());
   ```
   It is safer, just in case name contains `_analyzer` more than once.



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticIndexHelper.java:
##########
@@ -123,41 +124,48 @@ private static ObjectBuilder<IndexSettings> loadSettings(@NotNull IndexSettings.
         if (indexDefinition.getSimilarityProperties().size() > 0) {
             builder.otherSettings(ElasticIndexDefinition.ELASTIKNN, JsonData.of(true));
         }
+
+        // collect analyzer settings
+        IndexSettingsAnalysis.Builder analyzerBuilder =
+                ElasticCustomAnalyzer.buildCustomAnalyzers(indexDefinition.getAnalyzersNodeState(), "oak_analyzer");
+        if (analyzerBuilder == null) {
+            analyzerBuilder = new IndexSettingsAnalysis.Builder()
+                    .filter("oak_word_delimiter_graph_filter",

Review Comment:
   Create a constant for this string, it is used in other parts of the code.



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))

Review Comment:
   ```suggestion
                                       .filter(List.copyOf(tokenFilters.keySet()))
                                       .charFilter(List.copyOf(charFilters.keySet()))
   ```



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.

Review Comment:
   ```suggestion
    * to Elasticsearch configuration options.
   ```



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))
+                    )));
+                }
+                return builder;
+            }
+        }
+        return null;
+    }
+
+    @NotNull
+    private static TokenizerDefinition loadTokenizer(NodeState state) {
+        String name = normalize(Objects.requireNonNull(state.getString(FulltextIndexConstants.ANL_NAME)));
+        Map<String, Object> args = convertNodeState(state);
+        args.put(ANALYZER_TYPE, name);
+        return new TokenizerDefinition(name, JsonData.of(args));
+    }
+
+    private static <FD> LinkedHashMap<String, FD> loadFilters(NodeState state,
+                                                              Function<String, Class<? extends AbstractAnalysisFactory>> lookup,
+                                                              BiFunction<String, JsonData, FD> factory) {
+        LinkedHashMap<String, FD> filters = new LinkedHashMap<>();
+        int i = 0;
+        Tree tree = TreeFactory.createReadOnlyTree(state);
+        for (Tree t : tree.getChildren()) {
+            NodeState child = state.getChildNode(t.getName());
+            Class<? extends AbstractAnalysisFactory> tff = lookup.apply(t.getName());
+            String name;
+            try {
+                name = normalize((String) tff.getField("NAME").get(null));
+            } catch (Exception e) {
+                LOG.warn("unable to get the filter name using reflection. Try using the normalized node name", e);
+                name = normalize(t.getName());
+            }
+            Optional<Map<String, String>> mappingOpt =
+                    CONFIGURATION_MAPPING.entrySet().stream().filter(k -> k.getKey().isAssignableFrom(tff)).map(Map.Entry::getValue).findFirst();
+            Map<String, Object> args = convertNodeState(child, mappingOpt.orElseGet(Collections::emptyMap));
+
+            args.put(ANALYZER_TYPE, name);
+
+            filters.put(name + "_" + i++, factory.apply(name, JsonData.of(args)));
+        }
+        return filters;
+    }
+
+    private static List<String> loadContent(NodeState file, String name) throws IOException {
+        List<String> result = new ArrayList<>();
+        Blob blob = ConfigUtil.getBlob(file, name);
+        Reader content = null;
+        try {
+            content = new InputStreamReader(Objects.requireNonNull(blob).getNewStream(), StandardCharsets.UTF_8);
+            BufferedReader br = null;
+            try {
+                br = new BufferedReader(content);
+                String word;
+                while ((word = br.readLine()) != null) {
+                    result.add(word.trim());
+                }
+            } finally {
+                IOUtils.close(br);
+            }
+            return result;
+        } finally {
+            IOUtils.close(content);
+        }
+    }
+
+    /**
+     * Normalizes one of the following values:
+     * - lucene class (eg: org.apache.lucene.analysis.en.EnglishAnalyzer -> english)
+     * - lucene name (eg: Standard -> standard)
+     * into the elasticsearch compatible value
+     */
+    private static String normalize(String value) {
+        // this might be a full class, let's tokenize the value
+        String[] anlClassTokens = value.split("\\.");
+        // and take the last part
+        String name = anlClassTokens[anlClassTokens.length - 1];

Review Comment:
   Check if `anlClassTokens` is not empty and raise an exception with a clear message, to avoid an ArrayOutOfBoundsException in case of bad input to this method.



##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))
+                    )));
+                }
+                return builder;
+            }
+        }
+        return null;
+    }
+
+    @NotNull
+    private static TokenizerDefinition loadTokenizer(NodeState state) {
+        String name = normalize(Objects.requireNonNull(state.getString(FulltextIndexConstants.ANL_NAME)));
+        Map<String, Object> args = convertNodeState(state);
+        args.put(ANALYZER_TYPE, name);
+        return new TokenizerDefinition(name, JsonData.of(args));
+    }
+
+    private static <FD> LinkedHashMap<String, FD> loadFilters(NodeState state,
+                                                              Function<String, Class<? extends AbstractAnalysisFactory>> lookup,
+                                                              BiFunction<String, JsonData, FD> factory) {
+        LinkedHashMap<String, FD> filters = new LinkedHashMap<>();
+        int i = 0;
+        Tree tree = TreeFactory.createReadOnlyTree(state);
+        for (Tree t : tree.getChildren()) {
+            NodeState child = state.getChildNode(t.getName());
+            Class<? extends AbstractAnalysisFactory> tff = lookup.apply(t.getName());
+            String name;
+            try {
+                name = normalize((String) tff.getField("NAME").get(null));
+            } catch (Exception e) {
+                LOG.warn("unable to get the filter name using reflection. Try using the normalized node name", e);
+                name = normalize(t.getName());
+            }
+            Optional<Map<String, String>> mappingOpt =
+                    CONFIGURATION_MAPPING.entrySet().stream().filter(k -> k.getKey().isAssignableFrom(tff)).map(Map.Entry::getValue).findFirst();
+            Map<String, Object> args = convertNodeState(child, mappingOpt.orElseGet(Collections::emptyMap));
+
+            args.put(ANALYZER_TYPE, name);
+
+            filters.put(name + "_" + i++, factory.apply(name, JsonData.of(args)));
+        }
+        return filters;
+    }
+
+    private static List<String> loadContent(NodeState file, String name) throws IOException {
+        List<String> result = new ArrayList<>();
+        Blob blob = ConfigUtil.getBlob(file, name);
+        Reader content = null;
+        try {
+            content = new InputStreamReader(Objects.requireNonNull(blob).getNewStream(), StandardCharsets.UTF_8);
+            BufferedReader br = null;
+            try {
+                br = new BufferedReader(content);
+                String word;
+                while ((word = br.readLine()) != null) {
+                    result.add(word.trim());
+                }
+            } finally {
+                IOUtils.close(br);
+            }
+            return result;
+        } finally {
+            IOUtils.close(content);
+        }
+    }
+
+    /**
+     * Normalizes one of the following values:
+     * - lucene class (eg: org.apache.lucene.analysis.en.EnglishAnalyzer -> english)
+     * - lucene name (eg: Standard -> standard)
+     * into the elasticsearch compatible value
+     */
+    private static String normalize(String value) {
+        // this might be a full class, let's tokenize the value
+        String[] anlClassTokens = value.split("\\.");
+        // and take the last part
+        String name = anlClassTokens[anlClassTokens.length - 1];
+        // all options in elastic are in snake case
+        name = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_UNDERSCORE, name);
+        // if it ends with analyzer we need to get rid of it
+        if (name.endsWith("_analyzer")) {
+            name = name.replace("_analyzer", "");
+        }
+        return name;
+    }
+
+    private static Map<String, Object> convertNodeState(NodeState state) {
+        return convertNodeState(state, Collections.emptyMap());
+    }
+
+    private static Map<String, Object> convertNodeState(NodeState state, Map<String, String> mapping) {
+        return StreamSupport.stream(Spliterators.spliteratorUnknownSize(state.getProperties().iterator(), Spliterator.ORDERED), false)
+                .filter(ps -> ps.getType() != Type.BINARY)
+                .filter(ps -> !ps.isArray())
+                .filter(ps -> !NodeStateUtils.isHidden(ps.getName()))
+                .filter(ps -> !IGNORE_PROP_NAMES.contains(ps.getName()))

Review Comment:
   Nitpick: this could be transformed into a single filter and series of &&. It would generate less bytecode and be more efficient. In this case, it's not too important, but in general, I think it is as legible as having multiple chained filters, so there is not much to lose in grouping into a single filter.    



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [jackrabbit-oak] fabriziofortino commented on a diff in pull request #860: OAK-10111: support for custom analyzers in elastic

Posted by "fabriziofortino (via GitHub)" <gi...@apache.org>.
fabriziofortino commented on code in PR #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860#discussion_r1127635757


##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))
+                    )));
+                }
+                return builder;
+            }
+        }
+        return null;
+    }
+
+    @NotNull
+    private static TokenizerDefinition loadTokenizer(NodeState state) {
+        String name = normalize(Objects.requireNonNull(state.getString(FulltextIndexConstants.ANL_NAME)));
+        Map<String, Object> args = convertNodeState(state);
+        args.put(ANALYZER_TYPE, name);
+        return new TokenizerDefinition(name, JsonData.of(args));
+    }
+
+    private static <FD> LinkedHashMap<String, FD> loadFilters(NodeState state,
+                                                              Function<String, Class<? extends AbstractAnalysisFactory>> lookup,
+                                                              BiFunction<String, JsonData, FD> factory) {
+        LinkedHashMap<String, FD> filters = new LinkedHashMap<>();
+        int i = 0;
+        Tree tree = TreeFactory.createReadOnlyTree(state);
+        for (Tree t : tree.getChildren()) {
+            NodeState child = state.getChildNode(t.getName());
+            Class<? extends AbstractAnalysisFactory> tff = lookup.apply(t.getName());
+            String name;
+            try {
+                name = normalize((String) tff.getField("NAME").get(null));
+            } catch (Exception e) {
+                LOG.warn("unable to get the filter name using reflection. Try using the normalized node name", e);
+                name = normalize(t.getName());
+            }
+            Optional<Map<String, String>> mappingOpt =
+                    CONFIGURATION_MAPPING.entrySet().stream().filter(k -> k.getKey().isAssignableFrom(tff)).map(Map.Entry::getValue).findFirst();
+            Map<String, Object> args = convertNodeState(child, mappingOpt.orElseGet(Collections::emptyMap));
+
+            args.put(ANALYZER_TYPE, name);
+
+            filters.put(name + "_" + i++, factory.apply(name, JsonData.of(args)));
+        }
+        return filters;
+    }
+
+    private static List<String> loadContent(NodeState file, String name) throws IOException {
+        List<String> result = new ArrayList<>();
+        Blob blob = ConfigUtil.getBlob(file, name);
+        Reader content = null;
+        try {
+            content = new InputStreamReader(Objects.requireNonNull(blob).getNewStream(), StandardCharsets.UTF_8);
+            BufferedReader br = null;
+            try {
+                br = new BufferedReader(content);
+                String word;
+                while ((word = br.readLine()) != null) {
+                    result.add(word.trim());
+                }
+            } finally {
+                IOUtils.close(br);
+            }
+            return result;
+        } finally {
+            IOUtils.close(content);
+        }
+    }
+
+    /**
+     * Normalizes one of the following values:
+     * - lucene class (eg: org.apache.lucene.analysis.en.EnglishAnalyzer -> english)
+     * - lucene name (eg: Standard -> standard)
+     * into the elasticsearch compatible value
+     */
+    private static String normalize(String value) {
+        // this might be a full class, let's tokenize the value
+        String[] anlClassTokens = value.split("\\.");
+        // and take the last part
+        String name = anlClassTokens[anlClassTokens.length - 1];

Review Comment:
   done it even if it should not be possible to get back an empty array.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [jackrabbit-oak] fabriziofortino commented on a diff in pull request #860: OAK-10111: support for custom analyzers in elastic

Posted by "fabriziofortino (via GitHub)" <gi...@apache.org>.
fabriziofortino commented on code in PR #860:
URL: https://github.com/apache/jackrabbit-oak/pull/860#discussion_r1127639374


##########
oak-search-elastic/src/main/java/org/apache/jackrabbit/oak/plugins/index/elastic/index/ElasticCustomAnalyzer.java:
##########
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.index.elastic.index;
+
+import co.elastic.clients.elasticsearch._types.analysis.Analyzer;
+import co.elastic.clients.elasticsearch._types.analysis.CharFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.CustomAnalyzer;
+import co.elastic.clients.elasticsearch._types.analysis.TokenFilterDefinition;
+import co.elastic.clients.elasticsearch._types.analysis.TokenizerDefinition;
+import co.elastic.clients.elasticsearch.indices.IndexSettingsAnalysis;
+import co.elastic.clients.json.JsonData;
+import com.google.common.base.CaseFormat;
+import org.apache.commons.io.IOUtils;
+import org.apache.jackrabbit.JcrConstants;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.Tree;
+import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.plugins.index.search.FulltextIndexConstants;
+import org.apache.jackrabbit.oak.plugins.index.search.util.ConfigUtil;
+import org.apache.jackrabbit.oak.plugins.tree.factories.TreeFactory;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
+import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
+import org.apache.lucene.analysis.en.AbstractWordsFileFilterFactory;
+import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
+import org.apache.lucene.analysis.util.CharFilterFactory;
+import org.apache.lucene.analysis.util.TokenFilterFactory;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+
+/**
+ * Loads custom analysis index settings from a JCR NodeState. It also takes care of required transformations from lucene
+ * to elasticsearch configuration options.
+ */
+public class ElasticCustomAnalyzer {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ElasticCustomAnalyzer.class);
+
+    private static final String ANALYZER_TYPE = "type";
+
+    private static final Set<String> IGNORE_PROP_NAMES = new HashSet<>(Arrays.asList(
+            AbstractAnalysisFactory.LUCENE_MATCH_VERSION_PARAM,
+            FulltextIndexConstants.ANL_CLASS,
+            FulltextIndexConstants.ANL_NAME,
+            JcrConstants.JCR_PRIMARYTYPE)
+    );
+
+    /*
+     * Mappings for lucene options not available anymore to supported elastic counterparts
+     */
+    private static final Map<Class<? extends AbstractAnalysisFactory>, Map<String, String>> CONFIGURATION_MAPPING;
+
+    static {
+        CONFIGURATION_MAPPING = new LinkedHashMap<>();
+        CONFIGURATION_MAPPING.put(AbstractWordsFileFilterFactory.class, Collections.singletonMap("words", "stopwords"));
+        CONFIGURATION_MAPPING.put(MappingCharFilterFactory.class, Collections.singletonMap("mapping", "mappings"));
+    }
+
+    @Nullable
+    public static IndexSettingsAnalysis.Builder buildCustomAnalyzers(NodeState state, String analyzerName) {
+        if (state != null) {
+            NodeState defaultAnalyzer = state.getChildNode(FulltextIndexConstants.ANL_DEFAULT);
+            if (defaultAnalyzer.exists()) {
+                IndexSettingsAnalysis.Builder builder = new IndexSettingsAnalysis.Builder();
+                Map<String, Object> analyzer = convertNodeState(defaultAnalyzer);
+                String builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_CLASS);
+                if (builtIn == null) {
+                    builtIn = defaultAnalyzer.getString(FulltextIndexConstants.ANL_NAME);
+                }
+                if (builtIn != null) {
+                    analyzer.put(ANALYZER_TYPE, normalize(builtIn));
+
+                    // content params, usually stop words
+                    for (ChildNodeEntry nodeEntry : defaultAnalyzer.getChildNodeEntries()) {
+                        try {
+                            analyzer.put(normalize(nodeEntry.getName()), loadContent(nodeEntry.getNodeState(), nodeEntry.getName()));
+                        } catch (IOException e) {
+                            throw new IllegalStateException("Unable to load content for node entry " + nodeEntry.getName(), e);
+                        }
+                    }
+
+                    builder.analyzer(analyzerName, new Analyzer(null, JsonData.of(analyzer)));
+                } else { // try to compose the analyzer
+                    builder.tokenizer("custom_tokenizer", tb ->
+                            tb.definition(loadTokenizer(defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_TOKENIZER))));
+
+                    LinkedHashMap<String, TokenFilterDefinition> tokenFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_FILTERS),
+                            TokenFilterFactory::lookupClass, TokenFilterDefinition::new
+                    );
+                    tokenFilters.forEach((key, value) -> builder.filter(key, fn -> fn.definition(value)));
+
+                    LinkedHashMap<String, CharFilterDefinition> charFilters = loadFilters(
+                            defaultAnalyzer.getChildNode(FulltextIndexConstants.ANL_CHAR_FILTERS),
+                            CharFilterFactory::lookupClass, CharFilterDefinition::new
+                    );
+                    charFilters.forEach((key, value) -> builder.charFilter(key, fn -> fn.definition(value)));
+
+                    builder.analyzer(analyzerName, bf -> bf.custom(CustomAnalyzer.of(cab ->
+                            cab.tokenizer("custom_tokenizer")
+                                    .filter(new ArrayList<>(tokenFilters.keySet()))
+                                    .charFilter(new ArrayList<>(charFilters.keySet()))

Review Comment:
   `List.copyOf()` was introduced in Java 10. Is it okay to include it? There is currently no use of it in the repo.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: dev-unsubscribe@jackrabbit.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org