You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2015/10/12 14:21:28 UTC

[3/8] ambari git commit: AMBARI-13378. Stop-and-Start Upgrade: Merge feature branch to trunk. (dgrinenko via dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
new file mode 100644
index 0000000..48f5d50
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -0,0 +1,807 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+
+  <services>
+
+    <service name="RANGER">
+      <component name="RANGER_ADMIN">
+        <changes>
+          <definition id="hdp_2_3_0_0_update_ranger_env">
+            <type>ranger-env</type>
+            <set key="xml_configurations_supported" value="true" />
+          </definition>
+          <definition id="hdp_2_3_0_0_update_ranger_admin" summary="Updating Ranger Admin">
+            <type>ranger-admin-site</type>
+            <transfer operation="copy" from-type="ranger-site" from-key="HTTPS_CLIENT_AUTH" to-key="ranger.service.https.attrib.clientAuth" default-value="" />
+            <transfer operation="copy" from-type="ranger-site" from-key="HTTPS_KEYSTORE_FILE" to-key="ranger.https.attrib.keystore.file" default-value="" />
+            <transfer operation="copy" from-type="ranger-site" from-key="HTTPS_KEYSTORE_PASS" to-key="ranger.service.https.attrib.keystore.pass" default-value="" mask="true" />
+            <transfer operation="copy" from-type="ranger-site" from-key="HTTPS_KEY_ALIAS" to-key="ranger.service.https.attrib.keystore.keyalias" default-value="" />
+            <transfer operation="copy" from-type="ranger-site" from-key="HTTPS_SERVICE_PORT" to-key="ranger.service.https.port" default-value="" />
+            <transfer operation="copy" from-type="ranger-site" from-key="HTTP_ENABLED" to-key="ranger.service.http.enabled" default-value="" />
+            <transfer operation="copy" from-type="ranger-site" from-key="HTTP_SERVICE_PORT" to-key="ranger.service.http.port" default-value="" />
+
+            <transfer operation="copy" from-type="admin-properties" from-key="authServiceHostName" to-key="ranger.unixauth.service.hostname" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="authServicePort" to-key="ranger.unixauth.service.port" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="authentication_method" to-key="ranger.authentication.method" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="remoteLoginEnabled" to-key="ranger.unixauth.remote.login.enabled" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="xa_ldap_url" to-key="ranger.ldap.url" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="xa_ldap_userDNpattern" to-key="ranger.ldap.user.dnpattern" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="xa_ldap_groupSearchBase" to-key="ranger.ldap.group.searchbase" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="xa_ldap_groupSearchFilter" to-key="ranger.ldap.group.searchfilter" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="xa_ldap_groupRoleAttribute" to-key="ranger.ldap.group.roleattribute" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="xa_ldap_ad_domain" to-key="ranger.ldap.ad.domain" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="xa_ldap_ad_url" to-key="ranger.ldap.ad.url" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="db_user" to-key="ranger.jpa.jdbc.user" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="db_password" to-key="ranger.jpa.jdbc.password" default-value="" mask="true" />
+            <transfer operation="copy" from-type="admin-properties" from-key="audit_db_user" to-key="ranger.jpa.audit.jdbc.user" default-value="" />
+            <transfer operation="copy" from-type="admin-properties" from-key="audit_db_password" to-key="ranger.jpa.audit.jdbc.password" default-value="" mask="true" />
+
+            <set key="ranger.externalurl" value="{{ranger_external_url}}" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_update_ranger_usersync" summary="Updating Ranger Usersync">
+            <type>ranger-ugsync-site</type>
+            <transfer operation="copy" from-type="usersync-properties" from-key="CRED_KEYSTORE_FILENAME" to-key="ranger.usersync.credstore.filename" default-value="/etc/ranger/usersync/ugsync.jceks" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="MIN_UNIX_USER_ID_TO_SYNC" to-key="ranger.usersync.unix.minUserId" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_INTERVAL" to-key="ranger.usersync.sleeptimeinmillisbetweensynccycle" default-value="60000" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_BIND_DN" to-key="ranger.usersync.ldap.binddn" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_BIND_PASSWORD" to-key="ranger.usersync.ldap.ldapbindpassword" default-value="" mask="true" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_GROUPNAME_CASE_CONVERSION" to-key="ranger.usersync.ldap.groupname.caseconversion" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_URL" to-key="ranger.usersync.ldap.url" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_USERNAME_CASE_CONVERSION" to-key="ranger.usersync.ldap.username.caseconversion" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE" to-key="ranger.usersync.ldap.user.groupnameattribute" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_USER_NAME_ATTRIBUTE" to-key="ranger.usersync.ldap.user.nameattribute" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_USER_OBJECT_CLASS" to-key="ranger.usersync.ldap.user.objectclass" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_USER_SEARCH_BASE" to-key="ranger.usersync.ldap.user.searchbase" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_USER_SEARCH_FILTER" to-key="ranger.usersync.ldap.user.searchfilter" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_LDAP_USER_SEARCH_SCOPE" to-key="ranger.usersync.ldap.user.searchscope" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="logdir" to-key="ranger.usersync.logdir" default-value="" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="SYNC_SOURCE" to-key="ranger.usersync.sync.source" default-value="unix" />
+            <transfer operation="copy" from-type="usersync-properties" from-key="POLICY_MGR_URL" to-key="ranger.usersync.policymanager.baseURL" default-value="{{ranger_external_url}}" />
+            <set key="ranger.usersync.source.impl.class" value="" />
+            <set key="ranger.usersync.ldap.searchBase" value="" />
+            <set key="ranger.usersync.group.memberattributename" value="" />
+            <set key="ranger.usersync.group.nameattribute" value="" />
+            <set key="ranger.usersync.group.objectclass" value="" />
+            <set key="ranger.usersync.group.searchbase" value="" />
+            <set key="ranger.usersync.group.searchenabled" value="" />
+            <set key="ranger.usersync.group.searchfilter" value="" />
+            <set key="ranger.usersync.group.searchscope" value="" />
+            <set key="ranger.usersync.group.usermapsyncenabled" value="" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_update_ranger_site">
+            <type>ranger-site</type>
+            <transfer operation="delete" delete-key="HTTPS_CLIENT_AUTH" />
+            <transfer operation="delete" delete-key="HTTPS_KEYSTORE_FILE" />
+            <transfer operation="delete" delete-key="HTTPS_KEYSTORE_PASS" />
+            <transfer operation="delete" delete-key="HTTPS_KEY_ALIAS" />
+            <transfer operation="delete" delete-key="HTTPS_SERVICE_PORT" />
+            <transfer operation="delete" delete-key="HTTP_ENABLED" />
+            <transfer operation="delete" delete-key="HTTP_SERVICE_PORT" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_update_ranger_usersync_properties">
+            <type>usersync-properties</type>
+            <transfer operation="delete" delete-key="CRED_KEYSTORE_FILENAME" />
+            <transfer operation="delete" delete-key="MIN_UNIX_USER_ID_TO_SYNC" />
+            <transfer operation="delete" delete-key="SYNC_INTERVAL" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_BIND_DN" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_BIND_PASSWORD" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_GROUPNAME_CASE_CONVERSION" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_URL" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_USERNAME_CASE_CONVERSION" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_USER_NAME_ATTRIBUTE" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_USER_OBJECT_CLASS" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_USER_SEARCH_BASE" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_USER_SEARCH_FILTER" />
+            <transfer operation="delete" delete-key="SYNC_LDAP_USER_SEARCH_SCOPE" />
+            <transfer operation="delete" delete-key="logdir" />
+            <transfer operation="delete" delete-key="SYNC_SOURCE" />
+            <transfer operation="delete" delete-key="POLICY_MGR_URL" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_update_ranger_delete_oracle_home">
+            <type>ranger-env</type>
+            <transfer operation="delete" delete-key="oracle_home" />
+          </definition>
+          
+        </changes>
+      </component>
+    </service>
+
+    <service name="HDFS">
+      <component name="NAMENODE">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_3_0_0_modify_hadoop_env" summary="Modify hadoop-env.sh">
+            <type>hadoop-env</type>
+            <replace key="content" find="# Add libraries required by nodemanager" replace-with="" />
+            <replace key="content" find="MAPREDUCE_LIBS={{mapreduce_libs_path}}" replace-with="" />
+            <replace key="content" find=":${MAPREDUCE_LIBS}" replace-with="" />
+            <replace key="content" find=":/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/" replace-with="" />
+            <replace key="content" find=":/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/usr/hdp/current/tez-client/conf/" replace-with="" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hdfs_adjust_ranger_plugin">
+            <condition type="ranger-hdfs-plugin-properties" key="ranger-hdfs-plugin-enabled" value="Yes">
+              <type>hdfs-site</type>
+              <key>dfs.namenode.inode.attributes.provider.class</key>
+              <value>org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer</value>
+            </condition>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hdfs_transition_ranger_hdfs_policy" summary="Transitioning Ranger HDFS Policy">
+            <type>ranger-hdfs-policymgr-ssl</type>
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="SSL_KEYSTORE_FILE_PATH" to-key="xasecure.policymgr.clientssl.keystore" default-value="/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks" />
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="SSL_KEYSTORE_PASSWORD" to-key="xasecure.policymgr.clientssl.keystore.password" mask="true" default-value="myKeyFilePassword" />
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="SSL_TRUSTSTORE_FILE_PATH" to-key="xasecure.policymgr.clientssl.truststore" default-value="/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks" />
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="SSL_TRUSTSTORE_PASSWORD" to-key="xasecure.policymgr.clientssl.truststore.password" mask="true" default-value="changeit" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hdfs_transition_ranger_hdfs_audit" summary="Transitioning Ranger HDFS Audit">
+            <type>ranger-hdfs-audit</type>
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="XAAUDIT.DB.IS_ENABLED" to-key="xasecure.audit.destination.db" default-value="false"/>
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="XAAUDIT.HDFS.DESTINATION_DIRECTORY" to-key="xasecure.audit.destination.hdfs.dir" default-value="hdfs://NAMENODE_HOSTNAME:8020/ranger/audit" />
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="XAAUDIT.HDFS.IS_ENABLED" to-key="xasecure.audit.destination.hdfs" default-value="true" />
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY" to-key="xasecure.audit.destination.hdfs.batch.filespool.dir" default-value="/var/log/hadoop/hdfs/audit/hdfs/spool" />
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="XAAUDIT.DB.USER_NAME" to-key="xasecure.audit.destination.db.user" default-value="" />
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="XAAUDIT.DB.PASSWORD" to-key="xasecure.audit.destination.db.password" mask="true" default-value="" />
+            <set key="xasecure.audit.credential.provider.file" value="jceks://file{{credential_file}}"/>
+            <set key="xasecure.audit.destination.solr" value="false" />
+            <set key="xasecure.audit.destination.solr.urls" value="{{ranger_audit_solr_urls}}" />
+            <set key="xasecure.audit.destination.solr.zookeepers" value="none" />
+            <set key="xasecure.audit.destination.solr.batch.filespool.dir" value="/var/log/hadoop/hdfs/audit/solr/spool" />
+            <set key="xasecure.audit.destination.db.jdbc.driver" value="{{jdbc_driver}}" />
+            <set key="xasecure.audit.destination.db.jdbc.url" value="{{audit_jdbc_url}}" />
+            <set key="xasecure.audit.provider.summary.enabled" value="false" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hdfs_transition_ranger_hdfs_security" summary="Transitioning Ranger HDFS Security">
+            <type>ranger-hdfs-security</type>
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="REPOSITORY_NAME" to-key="ranger.plugin.hdfs.service.name" default-value="{{repo_name}}" />
+            <transfer operation="copy" from-type="ranger-hdfs-plugin-properties" from-key="POLICY_MGR_URL" to-key="ranger.plugin.hdfs.policy.rest.url" default-value="{{policymgr_mgr_url}}" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hdfs_ranger_hdfs_delete_old_properties">
+            <type>ranger-hdfs-plugin-properties</type>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_FILE" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_FILE" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS" />
+            <transfer operation="delete" delete-key="SQL_CONNECTOR_JAR" />
+            <transfer operation="delete" delete-key="SSL_KEYSTORE_FILE_PATH" />
+            <transfer operation="delete" delete-key="SSL_KEYSTORE_PASSWORD" />
+            <transfer operation="delete" delete-key="SSL_TRUSTSTORE_FILE_PATH" />
+            <transfer operation="delete" delete-key="SSL_TRUSTSTORE_PASSWORD" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.IS_ENABLED" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINATION_DIRECTORY" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.IS_ENABLED" />
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.USER_NAME" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.PASSWORD" />
+            <transfer operation="delete" delete-key="REPOSITORY_NAME" />
+            <transfer operation="delete" delete-key="POLICY_MGR_URL" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.FLAVOUR" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.DATABASE_NAME" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.HOSTNAME" />
+          </definition>
+
+        </changes>
+      </component>
+    </service>
+
+    <service name="MAPREDUCE2">
+      <component name="HISTORYSERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_3_0_0_mapreduce2_adjust_history_server">
+            <type>mapred-site</type>
+            <transfer operation="move" from-key="mapreduce.job.speculative.speculativecap" to-key="mapreduce.job.speculative.speculative-cap-running-tasks" default-value="0.1"/>
+            <transfer operation="delete" delete-key="mapreduce.task.tmp.dir" />
+            <set key="mapreduce.fileoutputcommitter.algorithm.version" value="1"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="YARN">
+      <component name="APP_TIMELINE_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_3_0_0_yarn_ats_enable_recovery">
+            <type>yarn-site</type>
+            <set key="yarn.timeline-service.recovery.enabled" value="true"/>
+            <set key="yarn.timeline-service.state-store-class" value="org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore"/>
+            <transfer operation="copy" from-key="yarn.timeline-service.leveldb-timeline-store.path" to-key="yarn.timeline-service.leveldb-state-store.path" default-value="/hadoop/yarn/timeline"/>
+          </definition>
+        </changes>
+      </component>
+
+      <component name="RESOURCEMANAGER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_disable_node_labels">
+            <type>yarn-site</type>
+            <set key="yarn.node-labels.enabled" value="false"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_clear_default_node_label_expression">
+            <type>capacity-scheduler</type>
+            <set key="yarn.scheduler.capacity.root.default-node-label-expression" value=""/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_check_cs_root_def_capacity" summary="Deleting the Capacity Scheduler root default capacity">
+            <type>capacity-scheduler</type>
+            <transfer operation="delete" delete-key="yarn.scheduler.capacity.root.accessible-node-labels.default.capacity"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_yarn_rm_check_cs_root_max_capacity" summary="Deleting the Capacity Scheduler root maximum capacity">
+            <type>capacity-scheduler</type>
+            <transfer operation="delete" delete-key="yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="HBASE">
+      <component name="HBASE_MASTER">
+        <changes>
+          <definition xsi:type="configure"
+                id="hdp_2_3_0_0_hbase_master_adjust_phoenix_scheduler_factory">
+            <condition type="hbase-env" key="phoenix_sql_enabled" value="true">
+              <type>hbase-site</type>
+              <key>hbase.region.server.rpc.scheduler.factory.class</key>
+              <value>org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory
+              </value>
+            </condition>
+          </definition>
+
+          <definition xsi:type="configure"
+                id="hdp_2_3_0_0_hbase_master_adjust_phoenix_rpc_controller_factory">
+            <condition type="hbase-env" key="phoenix_sql_enabled" value="true">
+              <type>hbase-site</type>
+              <key>hbase.rpc.controllerfactory.class</key>
+              <value>
+                org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory
+              </value>
+            </condition>
+          </definition>
+
+          <definition xsi:type="configure"
+                id="hdp_2_3_0_0_hbase_master_set_global_memstore_size">
+            <type>hbase-site</type>
+            <transfer operation="copy" from-type="hbase-site"
+                      from-key="hbase.regionserver.global.memstore.upperLimit"
+                      to-key="hbase.regionserver.global.memstore.size"
+                      default-value="0.4"/>
+          </definition>
+
+          <definition xsi:type="configure"
+                id="hdp_2_3_0_0_hbase_master_adjust_phoenix_indexed_wal_edit_codec">
+            <condition type="hbase-env" key="phoenix_sql_enabled" value="true">
+              <type>hbase-site</type>
+              <key>hbase.regionserver.wal.codec</key>
+              <value>
+                org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec
+              </value>
+            </condition>
+          </definition>
+
+          <definition xsi:type="configure"
+                id="hdp_2_3_0_0_hbase_master_adjust_authorization_coprocessors"
+                summary="Updating Authorization Coprocessors">
+            <type>hbase-site</type>
+            <replace key="hbase.coprocessor.master.classes"
+                     find="com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
+                     replace-with="org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor"/>
+            <replace key="hbase.coprocessor.region.classes"
+                     find="com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
+                     replace-with="org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor"/>
+          </definition>
+
+          <definition xsi:type="configure"
+                id="hdp_2_3_0_0_hbase_master_transition_ranger_hbase_policy"
+                summary="Transitioning Ranger HBase Policy">
+            <type>ranger-hbase-policymgr-ssl</type>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="SSL_KEYSTORE_FILE_PATH"
+                      to-key="xasecure.policymgr.clientssl.keystore"
+                      default-value="/usr/hdp/current/hbase-client/conf/ranger-plugin-keystore.jks"/>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="SSL_KEYSTORE_PASSWORD"
+                      to-key="xasecure.policymgr.clientssl.keystore.password"
+                      mask="true" default-value="myKeyFilePassword"/>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="SSL_TRUSTSTORE_FILE_PATH"
+                      to-key="xasecure.policymgr.clientssl.truststore"
+                      default-value="/usr/hdp/current/hbase-client/conf/ranger-plugin-truststore.jks"/>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="SSL_TRUSTSTORE_PASSWORD"
+                      to-key="xasecure.policymgr.clientssl.truststore.password"
+                      mask="true" default-value="changeit"/>
+          </definition>
+
+          <definition xsi:type="configure"
+                id="hdp_2_3_0_0_hbase_master_transition_ranger_hbase_audit"
+                summary="Transitioning Ranger HBase Audit">
+            <type>ranger-hbase-audit</type>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="XAAUDIT.DB.IS_ENABLED"
+                      to-key="xasecure.audit.destination.db"
+                      default-value="false"/>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="XAAUDIT.HDFS.DESTINATION_DIRECTORY"
+                      to-key="xasecure.audit.destination.hdfs.dir"
+                      default-value="hdfs://NAMENODE_HOSTNAME:8020/ranger/audit"/>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="XAAUDIT.HDFS.IS_ENABLED"
+                      to-key="xasecure.audit.destination.hdfs"
+                      default-value="true"/>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY"
+                      to-key="xasecure.audit.destination.hdfs.batch.filespool.dir"
+                      default-value="/var/log/hbase/audit/hdfs/spool"/>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="XAAUDIT.DB.USER_NAME"
+                      to-key="xasecure.audit.destination.db.user"
+                      default-value=""/>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="XAAUDIT.DB.PASSWORD"
+                      to-key="xasecure.audit.destination.db.password"
+                      mask="true" default-value=""/>
+            <set key="xasecure.audit.credential.provider.file"
+                 value="jceks://file{{credential_file}}"/>
+            <set key="xasecure.audit.destination.solr" value="false"/>
+            <set key="xasecure.audit.destination.solr.urls"
+                 value="{{ranger_audit_solr_urls}}"/>
+            <set key="xasecure.audit.destination.solr.zookeepers" value="none"/>
+            <set key="xasecure.audit.destination.solr.batch.filespool.dir"
+                 value="/var/log/hbase/audit/solr/spool"/>
+            <set key="xasecure.audit.destination.db.jdbc.driver"
+                 value="{{jdbc_driver}}"/>
+            <set key="xasecure.audit.destination.db.jdbc.url"
+                 value="{{audit_jdbc_url}}"/>
+            <set key="xasecure.audit.provider.summary.enabled" value="true"/>
+          </definition>
+
+          <definition xsi:type="configure"
+                id="hdp_2_3_0_0_hbase_master_copy_ranger_policies">
+            <type>ranger-hbase-security</type>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="UPDATE_XAPOLICIES_ON_GRANT_REVOKE"
+                      to-key="xasecure.hbase.update.xapolicies.on.grant.revoke"
+                      default-value="true"/>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="POLICY_MGR_URL"
+                      to-key="ranger.plugin.hbase.policy.rest.url"
+                      default-value="{{policymgr_mgr_url}}"/>
+            <transfer operation="copy"
+                      from-type="ranger-hbase-plugin-properties"
+                      from-key="REPOSITORY_NAME"
+                      to-key="ranger.plugin.hbase.service.name"
+                      default-value="{{repo_name}}"/>
+          </definition>
+
+          <definition xsi:type="configure"
+                id="hdp_2_3_0_0_hbase_master_delete_old_ranger_properties">
+            <type>ranger-hbase-plugin-properties</type>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.DESTINTATION_FILE"/>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS"/>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS"/>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS"/>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT"/>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY"/>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_FILE"/>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS"/>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="SQL_CONNECTOR_JAR"/>
+            <transfer operation="delete" delete-key="REPOSITORY_NAME"/>
+            <transfer operation="delete" delete-key="POLICY_MGR_URL"/>
+            <transfer operation="delete"
+                      delete-key="UPDATE_XAPOLICIES_ON_GRANT_REVOKE"/>
+            <transfer operation="delete" delete-key="SSL_KEYSTORE_FILE_PATH"/>
+            <transfer operation="delete" delete-key="SSL_KEYSTORE_PASSWORD"/>
+            <transfer operation="delete" delete-key="SSL_TRUSTSTORE_FILE_PATH"/>
+            <transfer operation="delete" delete-key="SSL_TRUSTSTORE_PASSWORD"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.IS_ENABLED"/>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.DESTINATION_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.IS_ENABLED"/>
+            <transfer operation="delete"
+                      delete-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.USER_NAME"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.PASSWORD"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.FLAVOUR"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.DATABASE_NAME"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.HOSTNAME"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="TEZ">
+      <component name="TEZ_CLIENT">
+        <changes>
+          <definition xsi:type="configure"
+                id="hdp_2_3_0_0_tez_client_adjust_properties">
+            <type>tez-site</type>
+            <set key="tez.am.view-acls" value="*"/>
+            <set key="tez.task.generate.counters.per.io" value="true"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="HIVE">
+      <component name="HIVE_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hive_server_set_transport_mode">
+            <condition type="hive-site" key="hive.server2.transport.mode" value="binary">
+              <type>hive-site</type>
+              <key>hive.server2.thrift.port</key>
+              <value>10010</value>
+            </condition>
+            <condition type="hive-site" key="hive.server2.transport.mode" value="http">
+              <type>hive-site</type>
+              <key>hive.server2.http.port</key>
+              <value>10011</value>
+            </condition>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hive_server_replace_auth_manager" summary="Update Hive Authentication Manager">
+            <type>hiveserver2-site</type>
+            <replace key="hive.security.authorization.manager" find="com.xasecure.authorization.hive.authorizer.XaSecureHiveAuthorizerFactory" replace-with="org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_authentification" summary="Configuring hive authentication">
+            <type>hive-site</type>
+            <transfer operation="delete" delete-key="hive.server2.authentication.ldap.url" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.ldap.baseDN" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.pam.services" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+            <transfer operation="delete" delete-key="hive.server2.custom.authentication.class" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.keytab" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="NONE"/>
+
+            <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.keytab" if-key="hive.server2.authentication" if-type="hive-site" if-value="ldap"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="ldap"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.pam.services" if-key="hive.server2.authentication" if-type="hive-site" if-value="ldap"/>
+            <transfer operation="delete" delete-key="hive.server2.custom.authentication.class" if-key="hive.server2.authentication" if-type="hive-site" if-value="ldap"/>
+
+            <transfer operation="delete" delete-key="hive.server2.authentication.ldap.url" if-key="hive.server2.authentication" if-type="hive-site" if-value="kerberos"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.ldap.baseDN" if-key="hive.server2.authentication" if-type="hive-site" if-value="kerberos"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.pam.services" if-key="hive.server2.authentication" if-type="hive-site" if-value="kerberos"/>
+            <transfer operation="delete" delete-key="hive.server2.custom.authentication.class" if-key="hive.server2.authentication" if-type="hive-site" if-value="kerberos"/>
+
+            <transfer operation="delete" delete-key="hive.server2.authentication.ldap.url" if-key="hive.server2.authentication" if-type="hive-site" if-value="pam"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.ldap.baseDN" if-key="hive.server2.authentication" if-type="hive-site" if-value="pam"/>
+            <transfer operation="delete" delete-key="hive.server2.custom.authentication.class" if-key="hive.server2.authentication" if-type="hive-site" if-value="pam"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.keytab" if-key="hive.server2.authentication" if-type="hive-site" if-value="pam"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="pam"/>
+
+            <transfer operation="delete" delete-key="hive.server2.authentication.ldap.url" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.ldap.baseDN" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.pam.services" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.keytab" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
+            <transfer operation="delete" delete-key="hive.server2.authentication.kerberos.principal" if-key="hive.server2.authentication" if-type="hive-site" if-value="custom"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_policy" summary="Configuring Ranger Hive Policy">
+            <type>ranger-hive-policymgr-ssl</type>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="SSL_KEYSTORE_FILE_PATH" to-key="xasecure.policymgr.clientssl.keystore" default-value="/usr/hdp/current/hive-server2/conf/ranger-plugin-keystore.jks"/>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="SSL_KEYSTORE_PASSWORD" to-key="xasecure.policymgr.clientssl.keystore.password" default-value="myKeyFilePassword" mask="true"/>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="SSL_TRUSTSTORE_FILE_PATH" to-key="xasecure.policymgr.clientssl.truststore" default-value="/usr/hdp/current/hive-server2/conf/ranger-plugin-truststore.jks"/>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="SSL_TRUSTSTORE_PASSWORD" to-key="xasecure.policymgr.clientssl.truststore.password" default-value="changeit" mask="true"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_security" summary="Configuring Ranger Hive Security">
+            <type>ranger-hive-security</type>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="UPDATE_XAPOLICIES_ON_GRANT_REVOKE" to-key="xasecure.hive.update.xapolicies.on.grant.revoke" default-value="true"/>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="POLICY_MGR_URL" to-key="ranger.plugin.hive.policy.rest.url" default-value="{{policymgr_mgr_url}}"/>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="REPOSITORY_NAME" to-key="ranger.plugin.hive.service.name" default-value="{{repo_name}}"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_audit" summary="Configuring Ranger Hive Audit">
+            <type>ranger-hive-audit</type>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="XAAUDIT.DB.IS_ENABLED" to-key="xasecure.audit.destination.db" default-value="true"/>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="XAAUDIT.HDFS.DESTINATION_DIRECTORY" to-key="xasecure.audit.destination.hdfs.dir" default-value="hdfs://NAMENODE_HOSTNAME:8020/ranger/audit"/>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="XAAUDIT.HDFS.IS_ENABLED" to-key="xasecure.audit.destination.hdfs" default-value="true"/>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY" to-key="xasecure.audit.destination.hdfs.batch.filespool.dir" default-value="/var/log/hive/audit/hdfs/spool"/>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="XAAUDIT.DB.USER_NAME" to-key="xasecure.audit.destination.db.user" default-value=""/>
+            <transfer operation="copy" from-type="ranger-hive-plugin-properties" from-key="XAAUDIT.DB.PASSWORD" to-key="xasecure.audit.destination.db.password" default-value="" mask="true"/>
+            <set key="xasecure.audit.credential.provider.file" value="jceks://file{{credential_file}}"/>
+            <set key="xasecure.audit.destination.solr" value="false"/>
+            <set key="xasecure.audit.destination.solr.urls" value="{{ranger_audit_solr_urls}}"/>
+            <set key="xasecure.audit.destination.solr.zookeepers" value="none"/>
+            <set key="xasecure.audit.destination.solr.batch.filespool.dir" value="/var/log/hive/audit/solr/spool"/>
+            <set key="xasecure.audit.destination.db.jdbc.driver" value="{{jdbc_driver}}"/>
+            <set key="xasecure.audit.destination.db.jdbc.url" value="{{audit_jdbc_url}}"/>
+            <set key="xasecure.audit.provider.summary.enabled" value="false"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hive_server_remove_deprecated_ranger_properties" summary="Removing Deprecated Ranger Hive Plugin Configurations">
+            <type>ranger-hive-plugin-properties</type>
+            <transfer operation="delete" delete-key="SSL_KEYSTORE_FILE_PATH"/>
+            <transfer operation="delete" delete-key="SSL_KEYSTORE_PASSWORD"/>
+            <transfer operation="delete" delete-key="SSL_TRUSTSTORE_FILE_PATH"/>
+            <transfer operation="delete" delete-key="SSL_TRUSTSTORE_PASSWORD"/>
+            <transfer operation="delete" delete-key="POLICY_MGR_URL"/>
+            <transfer operation="delete" delete-key="REPOSITORY_NAME"/>
+            <transfer operation="delete" delete-key="UPDATE_XAPOLICIES_ON_GRANT_REVOKE"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.IS_ENABLED"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINATION_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.IS_ENABLED"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.USER_NAME"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.PASSWORD"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_FILE"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_FILE"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="SQL_CONNECTOR_JAR" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.FLAVOUR" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.DATABASE_NAME" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.HOSTNAME" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hive_server_restore_transport_mode_on_downgrade">
+            <condition type="hive-site" key="hive.server2.transport.mode" value="binary">
+              <type>hive-site</type>
+              <key>hive.server2.thrift.port</key>
+              <value>10000</value>
+            </condition>
+            <condition type="hive-site" key="hive.server2.transport.mode" value="http">
+              <type>hive-site</type>
+              <key>hive.server2.http.port</key>
+              <value>10001</value>
+            </condition>
+          </definition>
+        </changes>
+      </component>
+
+      <component name="WEBHCAT_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_3_0_0_webhcat_server_update_env">
+            <type>webhcat-env</type>
+            <replace key="content" find="export HADOOP_HOME={{hadoop_home}}" replace-with="export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_webhcat_server_update_configuration_paths" summary="Updating Configuration Paths">
+            <type>webhcat-site</type>
+            <replace key="templeton.jar" find="/usr/hdp/current/hive-webhcat" replace-with="/usr/hdp/${hdp.version}/hive"/>
+            <replace key="templeton.libjars" find="/usr/hdp/current/zookeeper-client" replace-with="/usr/hdp/${hdp.version}/zookeeper,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar"/>
+            <replace key="templeton.hadoop" find="/usr/hdp/current/hadoop-client" replace-with="/usr/hdp/${hdp.version}/hadoop"/>
+            <replace key="templeton.hcat" find="/usr/hdp/current/hive-client" replace-with="/usr/hdp/${hdp.version}/hive"/>
+            <set key="templeton.hive.extra.files" value="/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="OOZIE">
+      <component name="OOZIE_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_3_0_0_oozie_remove_redundant_configurations">
+            <summary>Updating oozie-site to remove redundant configurations</summary>
+            <type>oozie-site</type>
+            <transfer operation="delete" delete-key="*" preserve-edits="true">
+              <keep-key>oozie.base.url</keep-key>
+              <keep-key>oozie.services.ext</keep-key>
+              <keep-key>oozie.db.schema.name</keep-key>
+              <keep-key>oozie.service.JPAService.jdbc.username</keep-key>
+              <keep-key>oozie.service.JPAService.jdbc.password</keep-key>
+              <keep-key>oozie.service.JPAService.jdbc.driver</keep-key>
+              <keep-key>oozie.service.JPAService.jdbc.url</keep-key>
+              <keep-key>oozie.service.AuthorizationService.security.enabled</keep-key>
+              <keep-key>oozie.authentication.type</keep-key>
+              <keep-key>oozie.authentication.simple.anonymous.allowed</keep-key>
+              <keep-key>oozie.authentication.kerberos.name.rules</keep-key>
+              <keep-key>oozie.service.HadoopAccessorService.hadoop.configurations</keep-key>
+              <keep-key>oozie.service.HadoopAccessorService.kerberos.enabled</keep-key>
+              <keep-key>oozie.service.URIHandlerService.uri.handlers</keep-key>
+
+              <!-- required by Falcon and should be preserved -->
+              <keep-key>oozie.service.ELService.ext.functions.coord-job-submit-instances</keep-key>
+              <keep-key>oozie.service.ELService.ext.functions.coord-action-create-inst</keep-key>
+              <keep-key>oozie.service.ELService.ext.functions.coord-action-create</keep-key>
+              <keep-key>oozie.service.ELService.ext.functions.coord-job-submit-data</keep-key>
+              <keep-key>oozie.service.ELService.ext.functions.coord-action-start</keep-key>
+              <keep-key>oozie.service.ELService.ext.functions.coord-sla-submit</keep-key>
+              <keep-key>oozie.service.ELService.ext.functions.coord-sla-create</keep-key>
+            </transfer>
+            <set key="oozie.credentials.credentialclasses" value="hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials" />
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="KNOX">
+      <component name="KNOX_GATEWAY">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_3_0_0_knox_configure_ranger_policy" summary="Configuring Ranger Knox Policy">
+            <type>ranger-knox-policymgr-ssl</type>
+            <transfer operation="copy" from-type="ranger-knox-plugin-properties" from-key="SSL_KEYSTORE_FILE_PATH" to-key="xasecure.policymgr.clientssl.keystore" default-value="/usr/hdp/current/knox-server/conf/ranger-plugin-keystore.jks"/>
+            <transfer operation="copy" from-type="ranger-knox-plugin-properties" from-key="SSL_KEYSTORE_PASSWORD" to-key="xasecure.policymgr.clientssl.keystore.password" default-value="myKeyFilePassword" mask="true"/>
+            <transfer operation="copy" from-type="ranger-knox-plugin-properties" from-key="SSL_TRUSTSTORE_FILE_PATH" to-key="xasecure.policymgr.clientssl.truststore" default-value="/usr/hdp/current/knox-server/conf/ranger-plugin-truststore.jks"/>
+            <transfer operation="copy" from-type="ranger-knox-plugin-properties" from-key="SSL_TRUSTSTORE_PASSWORD" to-key="xasecure.policymgr.clientssl.truststore.password" default-value="changeit" mask="true"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_knox_configure_ranger_knox_audit" summary="Configuring Ranger Knox Audit">
+            <type>ranger-knox-audit</type>
+            <transfer operation="copy" from-type="ranger-knox-plugin-properties" from-key="XAAUDIT.DB.IS_ENABLED" to-key="xasecure.audit.destination.db" default-value="true"/>
+            <transfer operation="copy" from-type="ranger-knox-plugin-properties" from-key="XAAUDIT.HDFS.DESTINATION_DIRECTORY" to-key="xasecure.audit.destination.hdfs.dir" default-value="hdfs://NAMENODE_HOSTNAME:8020/ranger/audit"/>
+            <transfer operation="copy" from-type="ranger-knox-plugin-properties" from-key="XAAUDIT.HDFS.IS_ENABLED" to-key="xasecure.audit.destination.hdfs" default-value="true"/>
+            <transfer operation="copy" from-type="ranger-knox-plugin-properties" from-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY" to-key="xasecure.audit.destination.hdfs.batch.filespool.dir" default-value="/var/log/knox/audit/hdfs/spool"/>
+            <transfer operation="copy" from-type="ranger-knox-plugin-properties" from-key="XAAUDIT.DB.USER_NAME" to-key="xasecure.audit.destination.db.user" default-value=""/>
+            <transfer operation="copy" from-type="ranger-knox-plugin-properties" from-key="XAAUDIT.DB.PASSWORD" to-key="xasecure.audit.destination.db.passwordr" default-value="" mask="true"/>
+            <set key="xasecure.audit.credential.provider.file" value="jceks://file{{credential_file}}"/>
+            <set key="xasecure.audit.destination.solr" value="false"/>
+            <set key="xasecure.audit.destination.solr.urls" value="{{ranger_audit_solr_urls}}"/>
+            <set key="xasecure.audit.destination.solr.zookeepers" value="none"/>
+            <set key="xasecure.audit.destination.solr.batch.filespool.dir" value="/var/log/knox/audit/solr/spool"/>
+            <set key="xasecure.audit.destination.db.jdbc.driver" value="{{jdbc_driver}}"/>
+            <set key="xasecure.audit.destination.db.jdbc.url" value="{{audit_jdbc_url}}"/>
+            <set key="xasecure.audit.provider.summary.enabled" value="false"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_knox_remove_deprecated_ranger_properties" summary="Removing Deprecated Ranger Knox Plugin Configurations">
+            <type>ranger-knox-plugin-properties</type>
+            <transfer operation="delete" delete-key="SSL_KEYSTORE_FILE_PATH"/>
+            <transfer operation="delete" delete-key="SSL_KEYSTORE_PASSWORD"/>
+            <transfer operation="delete" delete-key="SSL_TRUSTSTORE_FILE_PATH"/>
+            <transfer operation="delete" delete-key="SSL_TRUSTSTORE_PASSWORD"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.IS_ENABLED"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINATION_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.IS_ENABLED"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.USER_NAME"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.PASSWORD"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_FILE"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_FILE"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="SQL_CONNECTOR_JAR" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.FLAVOUR" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.DATABASE_NAME" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.HOSTNAME" />
+            <transfer operation="delete" delete-key="REPOSITORY_NAME" />
+            <transfer operation="delete" delete-key="POLICY_MGR_URL" />
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="STORM">
+      <component name="NIMBUS">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_3_0_0_nimbus_monitor_freq_adjustment">
+            <condition type="storm-site" key="nimbus.monitor.freq.secs" value="10">
+              <type>storm-site</type>
+              <key>nimbus.monitor.freq.secs</key>
+              <value>120</value>
+            </condition>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_nimbus_convert_nimbus_host_to_seeds" summary="Converting nimbus.host into nimbus.seeds">
+            <type>storm-site</type>
+            <transfer operation="copy" from-key="nimbus.host" to-key="nimbus.seeds" coerce-to="yaml-array"/>
+            <transfer operation="delete" delete-key="nimbus.host"/>
+            <replace key="nimbus.authorizer" find="com.xasecure.authorization.storm.authorizer.XaSecureStormAuthorizer" replace-with="org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_nimbus_update_env_vars" summary="Updating Storm home and configuration environment variables">
+            <type>storm-env</type>
+            <replace key="content" find="# export STORM_CONF_DIR=&quot;&quot;" replace-with="export STORM_CONF_DIR={{conf_dir}}"/>
+            <replace key="content" find="export STORM_HOME=/usr/hdp/current/storm-client" replace-with="export STORM_HOME={{storm_component_home_dir}}"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_nimbus_configure_ranger_policy" summary="Configuring Ranger Storm Policy">
+            <type>ranger-storm-policymgr-ssl</type>
+            <transfer operation="copy" from-type="ranger-storm-plugin-properties" from-key="SSL_KEYSTORE_FILE_PATH" to-key="xasecure.policymgr.clientssl.keystore" default-value="/usr/hdp/current/storm-client/conf/ranger-plugin-keystore.jks"/>
+            <transfer operation="copy" from-type="ranger-storm-plugin-properties" from-key="SSL_KEYSTORE_PASSWORD" to-key="xasecure.policymgr.clientssl.keystore.password" default-value="myKeyFilePassword" mask="true"/>
+            <transfer operation="copy" from-type="ranger-storm-plugin-properties" from-key="SSL_TRUSTSTORE_FILE_PATH" to-key="xasecure.policymgr.clientssl.truststore" default-value="/usr/hdp/current/storm-client/conf/ranger-plugin-truststore.jks"/>
+            <transfer operation="copy" from-type="ranger-storm-plugin-properties" from-key="SSL_TRUSTSTORE_PASSWORD" to-key="xasecure.policymgr.clientssl.truststore.password" default-value="changeit" mask="true"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_nimbus_configure_ranger_audit" summary="Configuring Ranger Storm Audit">
+            <type>ranger-storm-audit</type>
+            <transfer operation="copy" from-type="ranger-storm-plugin-properties" from-key="XAAUDIT.DB.IS_ENABLED" to-key="xasecure.audit.destination.db" default-value="true" />
+            <transfer operation="copy" from-type="ranger-storm-plugin-properties" from-key="XAAUDIT.HDFS.DESTINATION_DIRECTORY" to-key="xasecure.audit.destination.hdfs.dir" default-value="hdfs://NAMENODE_HOSTNAME:8020/ranger/audit" />
+            <transfer operation="copy" from-type="ranger-storm-plugin-properties" from-key="XAAUDIT.HDFS.IS_ENABLED" to-key="xasecure.audit.destination.hdfs" default-value="true" />
+            <transfer operation="copy" from-type="ranger-storm-plugin-properties" from-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY" to-key="xasecure.audit.destination.hdfs.batch.filespool.dir" default-value="/var/log/storm/audit/hdfs/spool" />
+            <transfer operation="copy" from-type="ranger-storm-plugin-properties" from-key="XAAUDIT.DB.USER_NAME" to-key="xasecure.audit.destination.db.user" default-value=""/>
+            <transfer operation="copy" from-type="ranger-storm-plugin-properties" from-key="XAAUDIT.DB.PASSWORD" to-key="xasecure.audit.destination.db.passwordr" default-value="" mask="true"/>
+            <set key="xasecure.audit.credential.provider.file" value="jceks://file{{credential_file}}"/>
+            <set key="xasecure.audit.destination.solr" value="false"/>
+            <set key="xasecure.audit.destination.solr.urls" value="{{ranger_audit_solr_urls}}"/>
+            <set key="xasecure.audit.destination.solr.zookeepers" value="none"/>
+            <set key="xasecure.audit.destination.solr.batch.filespool.dir" value="/var/log/storm/audit/solr/spool"/>
+            <set key="xasecure.audit.destination.db.jdbc.driver" value="{{jdbc_driver}}"/>
+            <set key="xasecure.audit.destination.db.jdbc.url" value="{{audit_jdbc_url}}"/>
+            <set key="xasecure.audit.provider.summary.enabled" value="false"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_nimbus_remove_deprecated_ranger_properties" summary="Removing Deprecated Ranger Storm Plugin Configurations">
+            <type>ranger-storm-plugin-properties</type>
+            <transfer operation="delete" delete-key="SSL_KEYSTORE_FILE_PATH"/>
+            <transfer operation="delete" delete-key="SSL_KEYSTORE_PASSWORD"/>
+            <transfer operation="delete" delete-key="SSL_TRUSTSTORE_FILE_PATH"/>
+            <transfer operation="delete" delete-key="SSL_TRUSTSTORE_PASSWORD"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.IS_ENABLED"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINATION_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.IS_ENABLED"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.USER_NAME"/>
+            <transfer operation="delete" delete-key="XAAUDIT.DB.PASSWORD"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_FILE"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_FILE"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS"/>
+            <transfer operation="delete" delete-key="SQL_CONNECTOR_JAR" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.FLAVOUR" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.DATABASE_NAME" />
+            <transfer operation="delete" delete-key="XAAUDIT.DB.HOSTNAME" />
+            <transfer operation="delete" delete-key="REPOSITORY_NAME" />
+            <transfer operation="delete" delete-key="POLICY_MGR_URL" />
+          </definition>
+        </changes>
+      </component>
+    </service>
+  </services>
+
+</upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
index 65ae2ed..4f57978 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
@@ -21,7 +21,20 @@
   <target>2.3.*.*</target>
   <skip-failures>false</skip-failures>
   <skip-service-check-failures>false</skip-service-check-failures>
-
+  <target-stack>HDP-2.3</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    <check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    <check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    <check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    <check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    <check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    <check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    <check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    <check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
       <direction>UPGRADE</direction>
@@ -539,18 +552,7 @@
             <message>Please note that the HiveServer port will now change to 10010 if hive is using a binary transfer mode or 10011 if hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
-          <task xsi:type="configure">
-            <condition type="hive-site" key="hive.server2.transport.mode" value="binary">
-              <type>hive-site</type>
-              <key>hive.server2.thrift.port</key>
-              <value>10010</value>
-            </condition>
-            <condition type="hive-site" key="hive.server2.transport.mode" value="http">
-              <type>hive-site</type>
-              <key>hive.server2.http.port</key>
-              <value>10011</value>
-            </condition>
-          </task>
+          <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_set_transport_mode"/>
         </pre-upgrade>
 
         <pre-downgrade>
@@ -559,18 +561,7 @@
             <message>Please note that the HiveServer port will now change to 10000 if hive is using a binary transfer mode or 10001 if hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
-          <task xsi:type="configure">
-            <condition type="hive-site" key="hive.server2.transport.mode" value="binary">
-              <type>hive-site</type>
-              <key>hive.server2.thrift.port</key>
-              <value>10000</value>
-            </condition>
-            <condition type="hive-site" key="hive.server2.transport.mode" value="http">
-              <type>hive-site</type>
-              <key>hive.server2.http.port</key>
-              <value>10001</value>
-            </condition>
-          </task>
+          <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_restore_transport_mode_on_downgrade"/>
         </pre-downgrade>
 
         <upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/checks/ConfigurationMergeCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ConfigurationMergeCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ConfigurationMergeCheckTest.java
index 44bf164..85a2f02 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ConfigurationMergeCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ConfigurationMergeCheckTest.java
@@ -95,37 +95,7 @@ public class ConfigurationMergeCheckTest {
     replay(config);
     cmc.config = config;
 
-    Assert.assertFalse(cmc.isApplicable(request));
-
-    final RepositoryVersionDAO repositoryVersionDAO = EasyMock.createMock(RepositoryVersionDAO.class);
-    expect(repositoryVersionDAO.findByStackNameAndVersion("HDP", "1.0")).andReturn(createFor("1.0")).anyTimes();
-    expect(repositoryVersionDAO.findByStackNameAndVersion("HDP", "1.1")).andReturn(createFor("1.1")).anyTimes();
-    expect(repositoryVersionDAO.findByStackNameAndVersion("HDP", "1.2")).andReturn(null).anyTimes();
-
-    replay(repositoryVersionDAO);
-
-    cmc.repositoryVersionDaoProvider = new Provider<RepositoryVersionDAO>() {
-      @Override
-      public RepositoryVersionDAO get() {
-        return repositoryVersionDAO;
-      }
-    };
-
-    cmc.clustersProvider = new Provider<Clusters>() {
-      @Override
-      public Clusters get() {
-        return clusters;
-      }
-    };
-
-    request.setRepositoryVersion("1.0");
-    Assert.assertFalse(cmc.isApplicable(request));
-
-    request.setRepositoryVersion("1.1");
     Assert.assertTrue(cmc.isApplicable(request));
-
-    request.setRepositoryVersion("1.2");
-    Assert.assertFalse(cmc.isApplicable(request));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
index 961c28d..18a1d45 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsMasterMaintenanceCheckTest.java
@@ -34,6 +34,7 @@ import org.apache.ambari.server.state.stack.PrerequisiteCheck;
 import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.UpgradePack.ProcessingComponent;
 import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -60,15 +61,15 @@ public class HostsMasterMaintenanceCheckTest {
     Mockito.when(config.getRollingUpgradeMaxStack()).thenReturn("");
     hmmc.config = config;
     Assert.assertTrue(hmmc.isApplicable(request));
-
-    request.setRepositoryVersion(null);
+    Assert.assertTrue(new HostsMasterMaintenanceCheck().isApplicable(request));
     HostsMasterMaintenanceCheck hmmc2 = new HostsMasterMaintenanceCheck();
     hmmc2.config = config;
-    Assert.assertFalse(hmmc2.isApplicable(request));
+    Assert.assertTrue(hmmc2.isApplicable(request));
   }
 
   @Test
   public void testPerform() throws Exception {
+    final String upgradePackName = "upgrade_pack";
     final HostsMasterMaintenanceCheck hostsMasterMaintenanceCheck = new HostsMasterMaintenanceCheck();
     hostsMasterMaintenanceCheck.clustersProvider = new Provider<Clusters>() {
 
@@ -100,13 +101,13 @@ public class HostsMasterMaintenanceCheckTest {
     Mockito.when(cluster.getClusterId()).thenReturn(1L);
     Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster);
     Mockito.when(cluster.getDesiredStackVersion()).thenReturn(new StackId("HDP", "1.0"));
-    Mockito.when(repositoryVersionHelper.getUpgradePackageName(Mockito.anyString(), Mockito.anyString(), Mockito.anyString())).thenReturn(null);
+    Mockito.when(repositoryVersionHelper.getUpgradePackageName(Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), (UpgradeType) Mockito.anyObject())).thenReturn(null);
 
     PrerequisiteCheck check = new PrerequisiteCheck(null, null);
     hostsMasterMaintenanceCheck.perform(check, new PrereqCheckRequest("cluster"));
     Assert.assertEquals(PrereqCheckStatus.FAIL, check.getStatus());
 
-    Mockito.when(repositoryVersionHelper.getUpgradePackageName(Mockito.anyString(), Mockito.anyString(), Mockito.anyString())).thenReturn("upgrade pack");
+    Mockito.when(repositoryVersionHelper.getUpgradePackageName(Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), (UpgradeType) Mockito.anyObject())).thenReturn(upgradePackName);
     Mockito.when(ambariMetaInfo.getUpgradePacks(Mockito.anyString(), Mockito.anyString())).thenReturn(new HashMap<String, UpgradePack>());
 
     check = new PrerequisiteCheck(null, null);
@@ -115,7 +116,8 @@ public class HostsMasterMaintenanceCheckTest {
 
     final Map<String, UpgradePack> upgradePacks = new HashMap<String, UpgradePack>();
     final UpgradePack upgradePack = Mockito.mock(UpgradePack.class);
-    upgradePacks.put("upgrade pack", upgradePack);
+    Mockito.when(upgradePack.getName()).thenReturn(upgradePackName);
+    upgradePacks.put(upgradePack.getName(), upgradePack);
     Mockito.when(ambariMetaInfo.getUpgradePacks(Mockito.anyString(), Mockito.anyString())).thenReturn(upgradePacks);
     Mockito.when(upgradePack.getTasks()).thenReturn(new HashMap<String, Map<String,ProcessingComponent>>());
     Mockito.when(cluster.getServices()).thenReturn(new HashMap<String, Service>());

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheckTest.java
index b54b633..0c2c92a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/HostsRepositoryVersionCheckTest.java
@@ -61,11 +61,10 @@ public class HostsRepositoryVersionCheckTest {
     Mockito.when(config.getRollingUpgradeMaxStack()).thenReturn("");
     hrvc.config = config;
     Assert.assertTrue(hrvc.isApplicable(request));
-
-    request.setRepositoryVersion(null);
+    Assert.assertTrue(new HostsMasterMaintenanceCheck().isApplicable(request));
     HostsRepositoryVersionCheck hrvc2 = new HostsRepositoryVersionCheck();
     hrvc2.config = config;
-    Assert.assertFalse(hrvc2.isApplicable(request));
+    Assert.assertTrue(hrvc2.isApplicable(request));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheckTest.java
index 5d32f4d..80740b3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/SecondaryNamenodeDeletedCheckTest.java
@@ -18,6 +18,8 @@
 package org.apache.ambari.server.checks;
 
 import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
 
 import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
@@ -74,25 +76,25 @@ public class SecondaryNamenodeDeletedCheckTest {
   @Test
   public void testIsApplicable() throws Exception {
     final Cluster cluster = Mockito.mock(Cluster.class);
+    final Map<String, Service> services = new HashMap<>();
+    final Service service = Mockito.mock(Service.class);
+
+    services.put("HDFS", service);
+
     Mockito.when(cluster.getClusterId()).thenReturn(1L);
+    Mockito.when(cluster.getServices()).thenReturn(services);
     Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster);
 
-    final Service service = Mockito.mock(Service.class);
-    Mockito.when(cluster.getService("HDFS")).thenReturn(service);
     Assert.assertTrue(secondaryNamenodeDeletedCheck.isApplicable(new PrereqCheckRequest("cluster")));
 
     PrereqCheckRequest req = new PrereqCheckRequest("cluster");
     req.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.FAIL);
-    Mockito.when(cluster.getService("HDFS")).thenReturn(service);
     Assert.assertFalse(secondaryNamenodeDeletedCheck.isApplicable(req));
 
     req.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.PASS);
-    Mockito.when(cluster.getService("HDFS")).thenReturn(service);
     Assert.assertTrue(secondaryNamenodeDeletedCheck.isApplicable(req));
 
-
-
-    Mockito.when(cluster.getService("HDFS")).thenThrow(new ServiceNotFoundException("no", "service"));
+    services.remove("HDFS");
     Assert.assertFalse(secondaryNamenodeDeletedCheck.isApplicable(new PrereqCheckRequest("cluster")));
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheckTest.java
index fea82f3..a7c6d58 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesMapReduceDistributedCacheCheckTest.java
@@ -64,24 +64,26 @@ public class ServicesMapReduceDistributedCacheCheckTest {
   @Test
   public void testIsApplicable() throws Exception {
     final Cluster cluster = Mockito.mock(Cluster.class);
-    Mockito.when(cluster.getClusterId()).thenReturn(1L);
+    final Map<String, Service> services = new HashMap<>();
+    final Service service = Mockito.mock(Service.class);
+
+    services.put("YARN", service);
+
+    Mockito.when(cluster.getServices()).thenReturn(services);
     Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster);
+    Mockito.when(cluster.getClusterId()).thenReturn(1L);
 
-    final Service service = Mockito.mock(Service.class);
-    Mockito.when(cluster.getService("YARN")).thenReturn(service);
     Assert.assertTrue(servicesMapReduceDistributedCacheCheck.isApplicable(new PrereqCheckRequest("cluster")));
 
     PrereqCheckRequest req = new PrereqCheckRequest("cluster");
     req.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.FAIL);
-    Mockito.when(cluster.getService("YARN")).thenReturn(service);
     Assert.assertFalse(servicesMapReduceDistributedCacheCheck.isApplicable(req));
 
     req.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.PASS);
-    Mockito.when(cluster.getService("YARN")).thenReturn(service);
     Assert.assertTrue(servicesMapReduceDistributedCacheCheck.isApplicable(req));
 
 
-    Mockito.when(cluster.getService("YARN")).thenThrow(new ServiceNotFoundException("no", "service"));
+    services.remove("YARN");
     Assert.assertFalse(servicesMapReduceDistributedCacheCheck.isApplicable(new PrereqCheckRequest("cluster")));
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheckTest.java
index 947121a..5713f59 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeHighAvailabilityCheckTest.java
@@ -65,14 +65,18 @@ public class ServicesNamenodeHighAvailabilityCheckTest {
   @Test
   public void testIsApplicable() throws Exception {
     final Cluster cluster = Mockito.mock(Cluster.class);
+    final Map<String, Service> services = new HashMap<>();
+    final Service service = Mockito.mock(Service.class);
+
+    services.put("HDFS", service);
+
+    Mockito.when(cluster.getServices()).thenReturn(services);
     Mockito.when(cluster.getClusterId()).thenReturn(1L);
     Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster);
 
-    final Service service = Mockito.mock(Service.class);
-    Mockito.when(cluster.getService("HDFS")).thenReturn(service);
     Assert.assertTrue(servicesNamenodeHighAvailabilityCheck.isApplicable(new PrereqCheckRequest("cluster")));
 
-    Mockito.when(cluster.getService("HDFS")).thenThrow(new ServiceNotFoundException("no", "service"));
+    services.remove("HDFS");
     Assert.assertFalse(servicesNamenodeHighAvailabilityCheck.isApplicable(new PrereqCheckRequest("cluster")));
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
index 07d17d8..ef39e9e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesNamenodeTruncateCheckTest.java
@@ -40,6 +40,7 @@ import org.junit.Before;
 import org.junit.Test;
 
 import com.google.inject.Provider;
+import org.mockito.Mockito;
 
 /**
  * Unit tests for ServicesUpCheck
@@ -56,9 +57,14 @@ public class ServicesNamenodeTruncateCheckTest {
     Cluster cluster = EasyMock.createMock(Cluster.class);
 
     Config config = EasyMock.createMock(Config.class);
+    final Map<String, Service> services = new HashMap<>();
+    final Service service = Mockito.mock(Service.class);
 
+    services.put("HDFS", service);
+
+    expect(cluster.getServices()).andReturn(services).anyTimes();
     expect(config.getProperties()).andReturn(m_configMap).anyTimes();
-    expect(cluster.getService("HDFS")).andReturn(EasyMock.createMock(Service.class));
+    expect(cluster.getService("HDFS")).andReturn(service);
     expect(cluster.getDesiredConfigByType("hdfs-site")).andReturn(config).anyTimes();
     expect(m_clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheckTest.java
index d732302..d70d575 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesTezDistributedCacheCheckTest.java
@@ -65,27 +65,28 @@ public class ServicesTezDistributedCacheCheckTest {
   @Test
   public void testIsApplicable() throws Exception {
     final Cluster cluster = Mockito.mock(Cluster.class);
+    final Map<String, Service> services = new HashMap<>();
+    final Service service = Mockito.mock(Service.class);
+
+    services.put("TEZ", service);
+
+    Mockito.when(cluster.getServices()).thenReturn(services);
     Mockito.when(cluster.getClusterId()).thenReturn(1L);
     Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster);
 
-    final Service service = Mockito.mock(Service.class);
-    Mockito.when(cluster.getService("TEZ")).thenReturn(service);
+
     Assert.assertTrue(servicesTezDistributedCacheCheck.isApplicable(new PrereqCheckRequest("cluster")));
 
     PrereqCheckRequest req = new PrereqCheckRequest("cluster");
     req.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.FAIL);
-    Mockito.when(cluster.getService("TEZ")).thenReturn(service);
     Assert.assertFalse(servicesTezDistributedCacheCheck.isApplicable(req));
 
     req.addResult(CheckDescription.SERVICES_NAMENODE_HA, PrereqCheckStatus.PASS);
-    Mockito.when(cluster.getService("TEZ")).thenReturn(service);
     Assert.assertTrue(servicesTezDistributedCacheCheck.isApplicable(req));
 
 
-    Mockito.when(cluster.getService("TEZ")).thenThrow(new ServiceNotFoundException("no", "service"));
+    services.remove("TEZ");
     Assert.assertFalse(servicesTezDistributedCacheCheck.isApplicable(new PrereqCheckRequest("cluster")));
-
-
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheckTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheckTest.java b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheckTest.java
index 135c9c9..5658f17 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheckTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/checks/ServicesYarnWorkPreservingCheckTest.java
@@ -65,14 +65,18 @@ public class ServicesYarnWorkPreservingCheckTest {
   @Test
   public void testIsApplicable() throws Exception {
     final Cluster cluster = Mockito.mock(Cluster.class);
+    final Map<String, Service> services = new HashMap<>();
+    final Service service = Mockito.mock(Service.class);
+
+    services.put("YARN", service);
+
+    Mockito.when(cluster.getServices()).thenReturn(services);
     Mockito.when(cluster.getClusterId()).thenReturn(1L);
     Mockito.when(clusters.getCluster("cluster")).thenReturn(cluster);
 
-    final Service service = Mockito.mock(Service.class);
-    Mockito.when(cluster.getService("YARN")).thenReturn(service);
     Assert.assertTrue(servicesYarnWorkPreservingCheck.isApplicable(new PrereqCheckRequest("cluster")));
 
-    Mockito.when(cluster.getService("YARN")).thenThrow(new ServiceNotFoundException("no", "service"));
+   services.remove("YARN");
     Assert.assertFalse(servicesYarnWorkPreservingCheck.isApplicable(new PrereqCheckRequest("cluster")));
   }