You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ol...@apache.org on 2018/08/16 09:58:11 UTC

[ambari] branch trunk updated: AMBARI-24480. Upgrade Infra Solr (from Solr 7.3.1 to Solr 7.4.0 (#2088)

This is an automated email from the ASF dual-hosted git repository.

oleewere pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 49e8ee3  AMBARI-24480. Upgrade Infra Solr (from Solr 7.3.1 to Solr 7.4.0 (#2088)
49e8ee3 is described below

commit 49e8ee3b82a4133aa5e3b4647c423200c8df1b20
Author: Olivér Szabó <ol...@gmail.com>
AuthorDate: Thu Aug 16 11:58:07 2018 +0200

    AMBARI-24480. Upgrade Infra Solr (from Solr 7.3.1 to Solr 7.4.0 (#2088)
---
 .../ambari-infra-manager/docker/docker-compose.yml |  2 +-
 .../docker/infra-manager-docker-compose.sh         |  2 +-
 ambari-infra/ambari-infra-solr-client/README.md    |  8 +-
 .../src/main/python/migrationHelper.py             |  2 +-
 .../src/main/resources/solrIndexHelper.sh          |  8 +-
 .../docker/infra-solr-docker-compose.sh            |  2 +-
 ambari-infra/pom.xml                               |  2 +-
 .../main/configsets/audit_logs/conf/solrconfig.xml |  2 +-
 .../configsets/hadoop_logs/conf/solrconfig.xml     |  2 +-
 .../main/configsets/history/conf/solrconfig.xml    |  2 +-
 ambari-logsearch/docker/Dockerfile                 |  2 +-
 ambari-logsearch/docker/docker-compose.yml         |  2 +-
 ambari-logsearch/docker/logsearch-docker.sh        |  2 +-
 ambari-logsearch/docker/solr.yml                   |  2 +-
 ambari-logsearch/docker/sso.yml                    |  2 +-
 ambari-logsearch/pom.xml                           |  2 +-
 .../ambari/server/upgrade/UpgradeCatalog271.java   | 99 ++++++++++++++++++++++
 .../server/upgrade/UpgradeCatalog271Test.java      | 64 ++++++++++++++
 18 files changed, 185 insertions(+), 22 deletions(-)

diff --git a/ambari-infra/ambari-infra-manager/docker/docker-compose.yml b/ambari-infra/ambari-infra-manager/docker/docker-compose.yml
index d77205f..2369d85 100644
--- a/ambari-infra/ambari-infra-manager/docker/docker-compose.yml
+++ b/ambari-infra/ambari-infra-manager/docker/docker-compose.yml
@@ -27,7 +27,7 @@ services:
       ZOO_SERVERS: server.1=zookeeper:2888:3888
   solr:
 #  TODO: use infra-solr
-    image: solr:${SOLR_VERSION:-7.3.1}
+    image: solr:${SOLR_VERSION:-7.4.0}
     restart: always
     hostname: solr
     ports:
diff --git a/ambari-infra/ambari-infra-manager/docker/infra-manager-docker-compose.sh b/ambari-infra/ambari-infra-manager/docker/infra-manager-docker-compose.sh
index c36d671..5271909 100755
--- a/ambari-infra/ambari-infra-manager/docker/infra-manager-docker-compose.sh
+++ b/ambari-infra/ambari-infra-manager/docker/infra-manager-docker-compose.sh
@@ -72,7 +72,7 @@ AMBARI_LOCATION=$AMBARI_LOCATION
 ZOOKEEPER_VERSION=3.4.10
 ZOOKEEPER_CONNECTION_STRING=zookeeper:2181
 
-SOLR_VERSION=7.3.1
+SOLR_VERSION=7.4.0
 
 HADOOP_VERSION=3.0.0
 EOF
diff --git a/ambari-infra/ambari-infra-solr-client/README.md b/ambari-infra/ambari-infra-solr-client/README.md
index a14f92a..2b6d004 100644
--- a/ambari-infra/ambari-infra-solr-client/README.md
+++ b/ambari-infra/ambari-infra-solr-client/README.md
@@ -472,7 +472,7 @@ If the script finished successfully and everything looks green on Ambari UI as w
 Migration for `ranger_audits` collection (cores):
 
 ```bash
-# by default, you will mirate to Lucene 6.6.2, if you want to migrate again to Solr 7 (not requred), you can use --version 7.3.1 flag
+# by default, you will mirate to Lucene 6.6.2, if you want to migrate again to Solr 7 (not requred), you can use --version 7.4.0 flag
 /usr/lib/ambari-infra-solr-client/migrationHelper.py --ini-file $CONFIG_INI_LOCATION --action migrate -s RANGER
 ```
 
@@ -487,7 +487,7 @@ infra-lucene-index-tool upgrade-index -d /tmp/ranger-backup -f -b -g
 # with 'infra-lucene-index-tool help' command you can checkout the command line options
 ```
 
-By default, the tool will migrate from lucene version 5 to lucene version 6.6.2. (that's ok for Solr 7) If you want a lucene 7 index, you will need to re-run the migration tool command with `-v 7.3.1` option.
+By default, the tool will migrate from lucene version 5 to lucene version 6.6.2. (that's ok for Solr 7) If you want a lucene 7 index, you will need to re-run the migration tool command with `-v 7.4.0` option.
 
 #### <a id="v/2.-migrate-atlas-collections">VI/2. Migrate Atlas collections</a>
 
@@ -509,7 +509,7 @@ infra-lucene-index-tool upgrade-index -d /tmp/fulltext_index_backup -f -b -g
 # with 'infra-lucene-index-tool help' command you can checkout the command line options
 ```
 
-By default, the tool will migrate from lucene version 5 to lucene version 6.6.2. (that's ok for Solr 7) If you want a lucene 7 index, you will need to re-run the migration tool command with `-v 7.3.1` option.
+By default, the tool will migrate from lucene version 5 to lucene version 6.6.2. (that's ok for Solr 7) If you want a lucene 7 index, you will need to re-run the migration tool command with `-v 7.4.0` option.
 
 ### <a id="vi.-restore-collections">VII. Restore Collections</a>
 
@@ -852,7 +852,7 @@ Options:
                         location of the index backups (for ranger). required
                         only if no backup path in the ini file
   --version=INDEX_VERSION
-                        lucene index version for migration (6.6.2 or 7.3.1)
+                        lucene index version for migration (6.6.2 or 7.4.0)
   --solr-async-request-tries=SOLR_ASYNC_REQUEST_TRIES
                         number of max tries for async Solr requests (e.g.:
                         delete operation)
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/python/migrationHelper.py b/ambari-infra/ambari-infra-solr-client/src/main/python/migrationHelper.py
index 8cb103d..b2e835b 100755
--- a/ambari-infra/ambari-infra-solr-client/src/main/python/migrationHelper.py
+++ b/ambari-infra/ambari-infra-solr-client/src/main/python/migrationHelper.py
@@ -1911,7 +1911,7 @@ if __name__=="__main__":
   parser.add_option("--atlas-index-location", dest="atlas_index_location", type="string", help="location of the index backups (for atlas). required only if no backup path in the ini file")
   parser.add_option("--ranger-index-location", dest="ranger_index_location", type="string", help="location of the index backups (for ranger). required only if no backup path in the ini file")
 
-  parser.add_option("--version", dest="index_version", type="string", default="6.6.2", help="lucene index version for migration (6.6.2 or 7.3.1)")
+  parser.add_option("--version", dest="index_version", type="string", default="6.6.2", help="lucene index version for migration (6.6.2 or 7.4.0)")
   parser.add_option("--solr-async-request-tries", dest="solr_async_request_tries", type="int", default=400,  help="number of max tries for async Solr requests (e.g.: delete operation)")
   parser.add_option("--request-tries", dest="request_tries", type="int", help="number of tries for BACKUP/RESTORE status api calls in the request")
   parser.add_option("--request-time-interval", dest="request_time_interval", type="int", help="time interval between BACKUP/RESTORE status api calls in the request")
diff --git a/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh b/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
index dfa96aa..5cd5b5f 100755
--- a/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
+++ b/ambari-infra/ambari-infra-solr-client/src/main/resources/solrIndexHelper.sh
@@ -43,7 +43,7 @@ function print_help() {
      -b, --backup-enabled                    Use indexer tool with backup snapshots. (core filter won't be used)
      -g, --debug                             Enable debug mode, IndexUpgrader output will be verbose.
      -f, --force                             Force to start index upgrade, even is the version is at least 6.
-     -v, --version                           Lucene version to upgrade (default: 6.6.2, available: 6.6.2, 7.3.1)
+     -v, --version                           Lucene version to upgrade (default: 6.6.2, available: 6.6.2, 7.4.0)
 EOF
 }
 
@@ -51,7 +51,7 @@ function upgrade_core() {
   local INDEX_DIR=${1:?"usage: <index_base_dir> e.g.: /opt/ambari_infra_solr/data"}
   local FORCE_UPDATE=${2:?"usage <force_update_flag> e.g.: true"}
   local SOLR_CORE_FILTERS=${3:?"usage: <comma separated core filters> e.g.: hadoop_logs,audit_logs,history"}
-  local LUCENE_VERSION=${4:?"usage <lucene_index_version> e.g.: 7.3.1"}
+  local LUCENE_VERSION=${4:?"usage <lucene_index_version> e.g.: 7.4.0"}
   local BACKUP_MODE=${5:?"usage <backup_mode_enabled> e.g.: true"}
   local DEBUG_MODE=${6:?"usage <debug_mode> e.g.: true"}
   SOLR_CORE_FILTER_ARR=$(echo $SOLR_CORE_FILTERS | sed "s/,/ /g")
@@ -204,12 +204,12 @@ function upgrade_index() {
 
 function upgrade_index_tool() {
   # see: https://cwiki.apache.org/confluence/display/solr/IndexUpgrader+Tool
-  : ${INDEX_VERSION:?"Please set the INDEX_VERSION variable! (6.6.2 or 7.3.1)"}
+  : ${INDEX_VERSION:?"Please set the INDEX_VERSION variable! (6.6.2 or 7.4.0)"}
   PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$DIR/migrate/lucene-core-$INDEX_VERSION.jar:$DIR/migrate/lucene-backward-codecs-$INDEX_VERSION.jar" org.apache.lucene.index.IndexUpgrader ${@}
 }
 
 function check_index_tool() {
-  : ${INDEX_VERSION:?"Please set the INDEX_VERSION variable! (6.6.2 or 7.3.1)"}
+  : ${INDEX_VERSION:?"Please set the INDEX_VERSION variable! (6.6.2 or 7.4.0)"}
   PATH=$JAVA_HOME/bin:$PATH $JVM -classpath "$DIR/migrate/lucene-core-$INDEX_VERSION.jar:$DIR/migrate/lucene-backward-codecs-$INDEX_VERSION.jar" org.apache.lucene.index.CheckIndex ${@}
 }
 
diff --git a/ambari-infra/ambari-infra-solr-plugin/docker/infra-solr-docker-compose.sh b/ambari-infra/ambari-infra-solr-plugin/docker/infra-solr-docker-compose.sh
index 69d8e08..502d87a 100755
--- a/ambari-infra/ambari-infra-solr-plugin/docker/infra-solr-docker-compose.sh
+++ b/ambari-infra/ambari-infra-solr-plugin/docker/infra-solr-docker-compose.sh
@@ -72,7 +72,7 @@ AMBARI_LOCATION=$AMBARI_LOCATION
 ZOOKEEPER_VERSION=3.4.10
 ZOOKEEPER_CONNECTION_STRING=zookeeper:2181
 
-SOLR_VERSION=7.3.1
+SOLR_VERSION=7.4.0
 EOF
 }
 
diff --git a/ambari-infra/pom.xml b/ambari-infra/pom.xml
index 79bff9b..3b4df8c 100644
--- a/ambari-infra/pom.xml
+++ b/ambari-infra/pom.xml
@@ -25,7 +25,7 @@
 
   <properties>
     <jdk.version>1.8</jdk.version>
-    <solr.version>7.3.1</solr.version>
+    <solr.version>7.4.0</solr.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
index a75070e..8f54121 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/audit_logs/conf/solrconfig.xml
@@ -35,7 +35,7 @@
        that you fully re-index after changing this setting as it can
        affect both how text is indexed and queried.
   -->
-  <luceneMatchVersion>7.3.1</luceneMatchVersion>
+  <luceneMatchVersion>7.4.0</luceneMatchVersion>
 
   <!-- <lib/> directives can be used to instruct Solr to load any Jars
        identified and use them to resolve any "plugins" specified in
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
index 424ca89..67db2e1 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/hadoop_logs/conf/solrconfig.xml
@@ -35,7 +35,7 @@
        that you fully re-index after changing this setting as it can
        affect both how text is indexed and queried.
   -->
-  <luceneMatchVersion>7.3.1</luceneMatchVersion>
+  <luceneMatchVersion>7.4.0</luceneMatchVersion>
 
   <!-- <lib/> directives can be used to instruct Solr to load any Jars
        identified and use them to resolve any "plugins" specified in
diff --git a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
index 56822e4..866b218 100644
--- a/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
+++ b/ambari-logsearch/ambari-logsearch-server/src/main/configsets/history/conf/solrconfig.xml
@@ -16,7 +16,7 @@
  limitations under the License.
 -->
 <config>
-  <luceneMatchVersion>7.3.1</luceneMatchVersion>
+  <luceneMatchVersion>7.4.0</luceneMatchVersion>
 
   <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-dataimporthandler-.*\.jar" />
 
diff --git a/ambari-logsearch/docker/Dockerfile b/ambari-logsearch/docker/Dockerfile
index ca6ac5e..d076565 100644
--- a/ambari-logsearch/docker/Dockerfile
+++ b/ambari-logsearch/docker/Dockerfile
@@ -54,7 +54,7 @@ RUN echo 'X11DisplayOffset 10\n' /etc/ssh/sshd_config
 RUN git config --global url."https://".insteadOf git://
 
 # Install Solr
-ENV SOLR_VERSION 7.3.1
+ENV SOLR_VERSION 7.4.0
 RUN wget --no-check-certificate -O /root/solr-$SOLR_VERSION.tgz http://public-repo-1.hortonworks.com/ARTIFACTS/dist/lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz
 RUN cd /root && tar -zxvf /root/solr-$SOLR_VERSION.tgz
 
diff --git a/ambari-logsearch/docker/docker-compose.yml b/ambari-logsearch/docker/docker-compose.yml
index b73ee5c..fb14622 100644
--- a/ambari-logsearch/docker/docker-compose.yml
+++ b/ambari-logsearch/docker/docker-compose.yml
@@ -26,7 +26,7 @@ services:
       ZOO_MY_ID: 1
       ZOO_SERVERS: server.1=zookeeper:2888:3888
   solr:
-    image: solr:${SOLR_VERSION:-7.3.1}
+    image: solr:${SOLR_VERSION:-7.4.0}
     restart: always
     hostname: solr
     ports:
diff --git a/ambari-logsearch/docker/logsearch-docker.sh b/ambari-logsearch/docker/logsearch-docker.sh
index 866ce41..72a332a 100755
--- a/ambari-logsearch/docker/logsearch-docker.sh
+++ b/ambari-logsearch/docker/logsearch-docker.sh
@@ -109,7 +109,7 @@ AMBARI_LOCATION=$AMBARI_LOCATION
 ZOOKEEPER_VERSION=3.4.10
 ZOOKEEPER_CONNECTION_STRING=zookeeper:2181
 
-SOLR_VERSION=7.3.1
+SOLR_VERSION=7.4.0
 EOF
     echo ".env file has been created. Check it out before starting Log Search. ($sdir/.env)"
     exit
diff --git a/ambari-logsearch/docker/solr.yml b/ambari-logsearch/docker/solr.yml
index 59ac354..2975af6 100644
--- a/ambari-logsearch/docker/solr.yml
+++ b/ambari-logsearch/docker/solr.yml
@@ -15,7 +15,7 @@
 version: '3.3'
 services:
   solr:
-    image: solr:${SOLR_VERSION:-7.3.1}
+    image: solr:${SOLR_VERSION:-7.4.0}
     restart: always
     networks:
       - logsearch-network
diff --git a/ambari-logsearch/docker/sso.yml b/ambari-logsearch/docker/sso.yml
index 311e448..0837dd8 100644
--- a/ambari-logsearch/docker/sso.yml
+++ b/ambari-logsearch/docker/sso.yml
@@ -26,7 +26,7 @@ services:
       ZOO_MY_ID: 1
       ZOO_SERVERS: server.1=zookeeper:2888:3888
   solr:
-    image: solr:${SOLR_VERSION:-7.3.1}
+    image: solr:${SOLR_VERSION:-7.4.0}
     restart: always
     hostname: solr
     ports:
diff --git a/ambari-logsearch/pom.xml b/ambari-logsearch/pom.xml
index cbe644b..0f07bfa 100644
--- a/ambari-logsearch/pom.xml
+++ b/ambari-logsearch/pom.xml
@@ -45,7 +45,7 @@
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
     <deb.architecture>amd64</deb.architecture>
     <deb.dependency.list>${deb.python.ver}</deb.dependency.list>
-    <solr.version>7.3.1</solr.version>
+    <solr.version>7.4.0</solr.version>
     <hadoop.version>3.0.0</hadoop.version>
     <common.io.version>2.5</common.io.version>
     <zookeeper.version>3.4.6.2.3.0.0-2557</zookeeper.version>
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog271.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog271.java
index 854b358..ddb7541 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog271.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog271.java
@@ -21,9 +21,11 @@ import static org.apache.ambari.server.upgrade.UpgradeCatalog270.AMBARI_INFRA_NE
 import static org.apache.ambari.server.upgrade.UpgradeCatalog270.AMBARI_INFRA_OLD_NAME;
 
 import java.sql.SQLException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
+import java.util.function.Function;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -54,6 +56,64 @@ public class UpgradeCatalog271 extends AbstractUpgradeCatalog {
    */
   private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog271.class);
 
+  private static final String SOLR_NEW_LOG4J2_XML = "<Configuration>\n" +
+    "  <Appenders>\n" +
+    "\n" +
+    "    <Console name=\"STDOUT\" target=\"SYSTEM_OUT\">\n" +
+    "      <PatternLayout>\n" +
+    "        <Pattern>\n" +
+    "          %d{ISO8601} [%t] %-5p [%X{collection} %X{shard} %X{replica} %X{core}] %C (%F:%L) - %m%n\n" +
+    "        </Pattern>\n" +
+    "      </PatternLayout>\n" +
+    "    </Console>\n" +
+    "\n" +
+    "    <RollingFile\n" +
+    "        name=\"RollingFile\"\n" +
+    "        fileName=\"{{infra_solr_log_dir}}/solr.log\"\n" +
+    "        filePattern=\"{{infra_solr_log_dir}}/solr.log.%i\" >\n" +
+    "      <PatternLayout>\n" +
+    "        <Pattern>\n" +
+    "          %d{ISO8601} [%t] %-5p [%X{collection} %X{shard} %X{replica} %X{core}] %C (%F:%L) - %m%n\n" +
+    "        </Pattern>\n" +
+    "      </PatternLayout>\n" +
+    "      <Policies>\n" +
+    "        <OnStartupTriggeringPolicy />\n" +
+    "        <SizeBasedTriggeringPolicy size=\"{{infra_log_maxfilesize}} MB\"/>\n" +
+    "      </Policies>\n" +
+    "      <DefaultRolloverStrategy max=\"{{infra_log_maxbackupindex}}\"/>\n" +
+    "    </RollingFile>\n" +
+    "\n" +
+    "    <RollingFile\n" +
+    "        name=\"SlowFile\"\n" +
+    "        fileName=\"{{infra_solr_log_dir}}/solr_slow_requests.log\"\n" +
+    "        filePattern=\"{{infra_solr_log_dir}}/solr_slow_requests.log.%i\" >\n" +
+    "      <PatternLayout>\n" +
+    "        <Pattern>\n" +
+    "          %d{ISO8601} [%t] %-5p [%X{collection} %X{shard} %X{replica} %X{core}] %C (%F:%L) - %m%n\n" +
+    "        </Pattern>\n" +
+    "      </PatternLayout>\n" +
+    "      <Policies>\n" +
+    "        <OnStartupTriggeringPolicy />\n" +
+    "        <SizeBasedTriggeringPolicy size=\"{{infra_log_maxfilesize}} MB\"/>\n" +
+    "      </Policies>\n" +
+    "      <DefaultRolloverStrategy max=\"{{infra_log_maxbackupindex}}\"/>\n" +
+    "    </RollingFile>\n" +
+    "\n" +
+    "  </Appenders>\n" +
+    "  <Loggers>\n" +
+    "    <Logger name=\"org.apache.hadoop\" level=\"warn\"/>\n" +
+    "    <Logger name=\"org.apache.solr.update.LoggingInfoStream\" level=\"off\"/>\n" +
+    "    <Logger name=\"org.apache.zookeeper\" level=\"warn\"/>\n" +
+    "    <Logger name=\"org.apache.solr.core.SolrCore.SlowRequest\" level=\"warn\" additivity=\"false\">\n" +
+    "      <AppenderRef ref=\"SlowFile\"/>\n" +
+    "    </Logger>\n" +
+    "\n" +
+    "    <Root level=\"warn\">\n" +
+    "      <AppenderRef ref=\"RollingFile\"/>\n" +
+    "      <!-- <AppenderRef ref=\"STDOUT\"/> -->\n" +
+    "    </Root>\n" +
+    "  </Loggers>\n" +
+    "</Configuration>";
   private static final String SERVICE_CONFIG_MAPPING_TABLE = "serviceconfigmapping";
   private static final String CLUSTER_CONFIG_TABLE = "clusterconfig";
   protected static final String CLUSTERS_TABLE = "clusters";
@@ -114,6 +174,7 @@ public class UpgradeCatalog271 extends AbstractUpgradeCatalog {
     updateRangerKmsDbUrl();
     renameAmbariInfraInConfigGroups();
     removeLogSearchPatternConfigs();
+    updateSolrConfigurations();
   }
 
   /**
@@ -270,4 +331,42 @@ public class UpgradeCatalog271 extends AbstractUpgradeCatalog {
         new DBAccessor.DBColumnInfo(CLUSTERS_BLUEPRINT_PROVISIONING_STATE_COLUMN, String.class, 255,
             BlueprintProvisioningState.NONE, true));
   }
+
+  /**
+   * Upgrade lucene version to 7.4.0 in Solr config of Log Search collections and Solr Log4j config
+   */
+  protected void updateSolrConfigurations() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters == null)
+      return;
+
+    Map<String, Cluster> clusterMap = clusters.getClusters();
+
+    if (clusterMap == null || clusterMap.isEmpty())
+      return;
+
+    for (final Cluster cluster : clusterMap.values()) {
+      updateConfig(cluster, "logsearch-service_logs-solrconfig", (content) -> updateLuceneMatchVersion(content,"7.4.0"));
+      updateConfig(cluster, "logsearch-audit_logs-solrconfig", (content) -> updateLuceneMatchVersion(content,"7.4.0"));
+      updateConfig(cluster, "infra-solr-log4j", (content) -> SOLR_NEW_LOG4J2_XML);
+    }
+  }
+
+  private void updateConfig(Cluster cluster, String configType, Function<String, String> contentUpdater) throws AmbariException {
+    Config config = cluster.getDesiredConfigByType(configType);
+    if (config == null)
+      return;
+    if (config.getProperties() == null || !config.getProperties().containsKey("content"))
+      return;
+
+    String content = config.getProperties().get("content");
+    content = contentUpdater.apply(content);
+    updateConfigurationPropertiesForCluster(cluster, configType, Collections.singletonMap("content", content), true, true);
+  }
+
+  private String updateLuceneMatchVersion(String content, String newLuceneMatchVersion) {
+    return content.replaceAll("<luceneMatchVersion>.*</luceneMatchVersion>",
+      "<luceneMatchVersion>" + newLuceneMatchVersion + "</luceneMatchVersion>");
+  }
 }
\ No newline at end of file
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog271Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog271Test.java
index 63247da..5bf1317 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog271Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog271Test.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.upgrade;
 
 import static org.apache.ambari.server.upgrade.UpgradeCatalog271.CLUSTERS_BLUEPRINT_PROVISIONING_STATE_COLUMN;
 import static org.apache.ambari.server.upgrade.UpgradeCatalog271.CLUSTERS_TABLE;
+import static org.easymock.EasyMock.anyBoolean;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.anyString;
 import static org.easymock.EasyMock.capture;
@@ -41,6 +42,7 @@ import java.util.Map;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
 import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.DaoUtils;
 import org.apache.ambari.server.state.BlueprintProvisioningState;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
@@ -92,6 +94,7 @@ public class UpgradeCatalog271Test {
     Method updateRangerKmsDbUrl = UpgradeCatalog271.class.getDeclaredMethod("updateRangerKmsDbUrl");
     Method renameAmbariInfraInConfigGroups = UpgradeCatalog271.class.getDeclaredMethod("renameAmbariInfraInConfigGroups");
     Method removeLogSearchPatternConfigs = UpgradeCatalog271.class.getDeclaredMethod("removeLogSearchPatternConfigs");
+    Method updateSolrConfigurations = UpgradeCatalog271.class.getDeclaredMethod("updateSolrConfigurations");
 
     UpgradeCatalog271 upgradeCatalog271 = createMockBuilder(UpgradeCatalog271.class)
       .addMockedMethod(updateRangerKmsDbUrl)
@@ -99,6 +102,7 @@ public class UpgradeCatalog271Test {
       .addMockedMethod(addNewConfigurationsFromXml)
       .addMockedMethod(renameAmbariInfraInConfigGroups)
       .addMockedMethod(removeLogSearchPatternConfigs)
+      .addMockedMethod(updateSolrConfigurations)
       .createMock();
 
     upgradeCatalog271.addNewConfigurationsFromXml();
@@ -116,6 +120,9 @@ public class UpgradeCatalog271Test {
     upgradeCatalog271.removeLogSearchPatternConfigs();
     expectLastCall().once();
 
+    upgradeCatalog271.updateSolrConfigurations();
+    expectLastCall().once();
+
     replay(upgradeCatalog271);
     upgradeCatalog271.executeDMLUpdates();
     verify(upgradeCatalog271);
@@ -292,4 +299,61 @@ public class UpgradeCatalog271Test {
     Assert.assertEquals(updatedRangerKmsEnvConfig.get("ranger_kms_privelege_user_jdbc_url"), "jdbc:mysql://c6401.ambari.apache.org:3546");
   }
 
+  @Test
+  public void testUpdateSolrConfigurations() throws Exception {
+    // GIVEN
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+
+    Config mockedServiceLogSolrConfig = easyMockSupport.createNiceMock(Config.class);
+    Config mockedAudiitLogSolrConfig = easyMockSupport.createNiceMock(Config.class);
+    Config mockedSolrLog4JConfig = easyMockSupport.createNiceMock(Config.class);
+
+    Map<String, Config> allDummy = new HashMap<>();
+
+    Map<String, String> serviceLogProps = new HashMap<>();
+    serviceLogProps.put("content", "<luceneMatchVersion>7.3.1</luceneMatchVersion>");
+    Map<String, String> auditLogProps = new HashMap<>();
+    auditLogProps.put("content", "<luceneMatchVersion>7.3.1</luceneMatchVersion>");
+    Map<String, String> solrLog4jProps = new HashMap<>();
+    solrLog4jProps.put("content", "log4jContent");
+
+    Injector injector = easyMockSupport.createNiceMock(Injector.class);
+    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+      .addMockedMethod("createConfiguration")
+      .addMockedMethod("getClusters", new Class[] { })
+      .addMockedMethod("createConfig")
+      .createNiceMock();
+
+    DaoUtils daoUtilsMock = easyMockSupport.createNiceMock(DaoUtils.class);
+    Map<String, Cluster> clusterMap = new HashMap<>();
+    clusterMap.put("cl1", cluster);
+    expect(injector.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    expect(injector.getInstance(DaoUtils.class)).andReturn(daoUtilsMock).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(clusters.getClusters()).andReturn(clusterMap).anyTimes();
+    expect(cluster.getDesiredConfigByType("logsearch-service_logs-solrconfig")).andReturn(mockedServiceLogSolrConfig);
+    expect(cluster.getDesiredConfigByType("logsearch-audit_logs-solrconfig")).andReturn(mockedAudiitLogSolrConfig);
+    expect(cluster.getDesiredConfigByType("infra-solr-log4j")).andReturn(mockedSolrLog4JConfig);
+    expect(mockedServiceLogSolrConfig.getProperties()).andReturn(serviceLogProps).anyTimes();
+    expect(mockedAudiitLogSolrConfig.getProperties()).andReturn(auditLogProps).anyTimes();
+    expect(mockedSolrLog4JConfig.getProperties()).andReturn(solrLog4jProps).anyTimes();
+    // WHEN
+    replay(daoUtilsMock, controller, injector, clusters, cluster, mockedServiceLogSolrConfig, mockedAudiitLogSolrConfig, mockedSolrLog4JConfig);
+    UpgradeCatalog271 underTest = createMockBuilder(UpgradeCatalog271.class)
+      .withConstructor(Injector.class)
+      .withArgs(injector)
+      .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class, Map.class, boolean.class, boolean.class)
+      .createNiceMock();
+    underTest.updateConfigurationPropertiesForCluster(anyObject(Cluster.class), anyString(), anyObject(), anyBoolean(), anyBoolean());
+    expectLastCall().times(3);
+    replay(underTest);
+    underTest.updateSolrConfigurations();
+    // THEN
+    easyMockSupport.verifyAll();
+  }
+
+
 }