You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2015/12/11 21:26:39 UTC

ambari git commit: AMBARI-14343 - HiveServer Upgrade Fails Because Of Missing Kerberos Properties (jonathanhurley)

Repository: ambari
Updated Branches:
  refs/heads/trunk 56c12489c -> 9535a51eb


AMBARI-14343 - HiveServer Upgrade Fails Because Of Missing Kerberos Properties (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9535a51e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9535a51e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9535a51e

Branch: refs/heads/trunk
Commit: 9535a51eb31ffcaf9e9a8616c77b7fe056e3179f
Parents: 56c1248
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Fri Dec 11 11:52:48 2015 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Dec 11 15:26:32 2015 -0500

----------------------------------------------------------------------
 .../upgrades/HiveKerberosConfigAction.java      | 115 +++++++++++++
 .../upgrades/YarnConfigCalculation.java         |  37 +----
 .../upgrades/ZooKeeperQuorumCalculator.java     |  75 +++++++++
 .../HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml |  13 +-
 .../stacks/HDP/2.2/upgrades/config-upgrade.xml  |   4 +-
 .../HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml |   5 +-
 .../stacks/HDP/2.2/upgrades/upgrade-2.3.xml     |   7 +-
 .../stacks/HDP/2.3/upgrades/config-upgrade.xml  |   5 +
 .../upgrades/HiveKerberosConfigActionTest.java  | 166 +++++++++++++++++++
 9 files changed, 383 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9535a51e/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveKerberosConfigAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveKerberosConfigAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveKerberosConfigAction.java
new file mode 100644
index 0000000..57b5135
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveKerberosConfigAction.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.serveraction.upgrades;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.serveraction.AbstractServerAction;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+
+import com.google.inject.Inject;
+
+/**
+ * The {@link HiveKerberosConfigAction} is used to ensure that the following
+ * settings are correctly set when upgrading a Keberized Hive Server:
+ * <ul>
+ * <li>hive.zookeeper.quorum</li>
+ * <li>hive.cluster.delegation.token.store.zookeeper.connectString</li>
+ * </ul>
+ *
+ * This is typically only needed when upgrading from a version which does not
+ * have these properties. The upgrade merge logic can't do complex calculations,
+ * such as the ZK quorum.
+ * <p/>
+ * The above properties will only be set if {@code cluster-env/security_enabled}
+ * is {@code true}.
+ */
+public class HiveKerberosConfigAction extends AbstractServerAction {
+  protected static final String HIVE_SITE_CONFIG_TYPE = "hive-site";
+  protected static final String CLUSTER_ENV_CONFIG_TYPE = "cluster-env";
+
+  protected static final String CLUSTER_ENV_SECURITY_ENABLED = "security_enabled";
+  protected static final String HIVE_SITE_ZK_QUORUM = "hive.zookeeper.quorum";
+  protected static final String HIVE_SITE_ZK_CONNECT_STRING = "hive.cluster.delegation.token.store.zookeeper.connectString";
+
+  /**
+   * Used for retrieving the cluster (and eventually the desired configuration).
+   */
+  @Inject
+  private Clusters m_clusters;
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
+      throws AmbariException, InterruptedException {
+
+    String clusterName = getExecutionCommand().getClusterName();
+    Cluster cluster = m_clusters.getCluster(clusterName);
+
+    Config clusterEnv = cluster.getDesiredConfigByType(CLUSTER_ENV_CONFIG_TYPE);
+
+    if (null == clusterEnv) {
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
+          String.format(
+              "The %s configuration type was not found; unable to determine whether Hive is Kerberized",
+              CLUSTER_ENV_CONFIG_TYPE),
+          "");
+    }
+
+    // gets the security_enabled property; if it doesn't exist or is blank,
+    // Boolean will be false (no need for extra null check)
+    Map<String, String> clusterEnvProperties = clusterEnv.getProperties();
+    boolean securityEnabled = Boolean.parseBoolean(clusterEnvProperties.get(CLUSTER_ENV_SECURITY_ENABLED));
+
+    if (!securityEnabled) {
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
+          "Hive is not Kerberized, skipping Kerberos-specific configuration properties", "");
+    }
+
+    Config hiveSite = cluster.getDesiredConfigByType(HIVE_SITE_CONFIG_TYPE);
+    if (hiveSite == null) {
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
+          String.format(
+              "The %s configuration type was not found; unable to set Hive configuration properties",
+              CLUSTER_ENV_CONFIG_TYPE),
+          "");
+    }
+
+    String zookeeperQuorum = ZooKeeperQuorumCalculator.getZooKeeperQuorumString(cluster);
+
+    Map<String, String> hiveSiteProperties = hiveSite.getProperties();
+    hiveSiteProperties.put(HIVE_SITE_ZK_QUORUM, zookeeperQuorum);
+    hiveSiteProperties.put(HIVE_SITE_ZK_CONNECT_STRING, zookeeperQuorum);
+
+    hiveSite.setProperties(hiveSiteProperties);
+    hiveSite.persist(false);
+
+    return createCommandReport(0, HostRoleStatus.COMPLETED, "{}",
+        String.format("Successfully set %s and %s in %s", HIVE_SITE_ZK_QUORUM,
+            HIVE_SITE_ZK_CONNECT_STRING, HIVE_SITE_CONFIG_TYPE),
+        "");
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9535a51e/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
index 5953553..feefcaf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
@@ -18,24 +18,18 @@
 
 package org.apache.ambari.server.serveraction.upgrades;
 
-import com.google.inject.Inject;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
 import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.Role;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.commons.lang.StringUtils;
 
-import java.math.BigDecimal;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.List;
-import java.util.concurrent.ConcurrentMap;
+import com.google.inject.Inject;
 
 /**
  * Computes Yarn properties.  This class is only used when moving from
@@ -43,11 +37,9 @@ import java.util.concurrent.ConcurrentMap;
  */
 public class YarnConfigCalculation extends AbstractServerAction {
   private static final String YARN_SITE_CONFIG_TYPE = "yarn-site";
-  private static final String ZOO_CFG_CONFIG_TYPE = "zoo.cfg";
+
   private static final String YARN_RM_ZK_ADDRESS_PROPERTY_NAME = "yarn.resourcemanager.zk-address";
   private static final String HADOOP_REGISTRY_ZK_QUORUM_PROPERTY_NAME = "hadoop.registry.zk.quorum";
-  private static final String ZOOKEEPER_CLIENT_PORT_PROPERTY_NAME = "clientPort";
-  private static final String DEFAULT_ZK_CLIENT_PORT = "2181";
 
   @Inject
   private Clusters clusters;
@@ -71,24 +63,7 @@ public class YarnConfigCalculation extends AbstractServerAction {
     String oldRmZkAddress = yarnSiteProperties.get(YARN_RM_ZK_ADDRESS_PROPERTY_NAME);
     String oldHadoopRegistryZKQuorum = yarnSiteProperties.get(HADOOP_REGISTRY_ZK_QUORUM_PROPERTY_NAME);
 
-    String zkClientPort = DEFAULT_ZK_CLIENT_PORT;
-    Config zooConfig = cluster.getDesiredConfigByType(ZOO_CFG_CONFIG_TYPE);
-    if(zooConfig != null) {
-      Map<String, String> zooProperties = zooConfig.getProperties();
-      if(zooProperties.containsKey(ZOOKEEPER_CLIENT_PORT_PROPERTY_NAME)) {
-        zkClientPort = zooProperties.get(ZOOKEEPER_CLIENT_PORT_PROPERTY_NAME);
-      }
-    }
-
-    List<ServiceComponentHost> zkServers = cluster.getServiceComponentHosts(
-        Service.Type.ZOOKEEPER.name(), Role.ZOOKEEPER_SERVER.name());
-
-    List<String> zkAddresses = new ArrayList<>();
-    for(ServiceComponentHost zkServer : zkServers) {
-      String zkAddress = zkServer.getHostName() + ":" + zkClientPort;
-      zkAddresses.add(zkAddress);
-    }
-    String zkServersStr = StringUtils.join(zkAddresses, ",");
+    String zkServersStr = ZooKeeperQuorumCalculator.getZooKeeperQuorumString(cluster);
     yarnSiteProperties.put(YARN_RM_ZK_ADDRESS_PROPERTY_NAME, zkServersStr);
     yarnSiteProperties.put(HADOOP_REGISTRY_ZK_QUORUM_PROPERTY_NAME, zkServersStr);
     yarnSiteConfig.setProperties(yarnSiteProperties);

http://git-wip-us.apache.org/repos/asf/ambari/blob/9535a51e/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ZooKeeperQuorumCalculator.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ZooKeeperQuorumCalculator.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ZooKeeperQuorumCalculator.java
new file mode 100644
index 0000000..fa3683a
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ZooKeeperQuorumCalculator.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.serveraction.upgrades;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.commons.lang.StringUtils;
+
+/**
+ * The {@link ZooKeeperQuorumCalculator} is a central location which can be used
+ * to construct a comma-separated string of known ZooKeeper hosts. The returned
+ * string constains FQDN of each known ZK host along with a port value for each.
+ */
+public class ZooKeeperQuorumCalculator {
+  private static final String ZOO_CFG_CONFIG_TYPE = "zoo.cfg";
+  private static final String ZOOKEEPER_CLIENT_PORT_PROPERTY_NAME = "clientPort";
+  private static final String DEFAULT_ZK_CLIENT_PORT = "2181";
+
+  /**
+   * Gets a comma-separate list of the ZK servers along with their ports.
+   *
+   * @param cluster
+   *          the cluster (not {@code null}).
+   * @return the list of FQDN ZooKeeper hosts, or an empty string (never
+   *         {@code null}).
+   */
+  static String getZooKeeperQuorumString(Cluster cluster) {
+
+    // attempt to calculate the port
+    String zkClientPort = DEFAULT_ZK_CLIENT_PORT;
+    Config zooConfig = cluster.getDesiredConfigByType(ZOO_CFG_CONFIG_TYPE);
+    if (zooConfig != null) {
+      Map<String, String> zooProperties = zooConfig.getProperties();
+      if (zooProperties.containsKey(ZOOKEEPER_CLIENT_PORT_PROPERTY_NAME)) {
+        zkClientPort = zooProperties.get(ZOOKEEPER_CLIENT_PORT_PROPERTY_NAME);
+      }
+    }
+
+    // get all known ZK hosts
+    List<ServiceComponentHost> zkServers = cluster.getServiceComponentHosts(
+        Service.Type.ZOOKEEPER.name(), Role.ZOOKEEPER_SERVER.name());
+
+    List<String> zkAddresses = new ArrayList<>();
+    for (ServiceComponentHost zkServer : zkServers) {
+      String zkAddress = zkServer.getHostName() + ":" + zkClientPort;
+      zkAddresses.add(zkAddress);
+    }
+
+    // join on comma without any spaces
+    String zkServersStr = StringUtils.join(zkAddresses, ",");
+    return zkServersStr;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/9535a51e/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
index 015284d..02cc107 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
@@ -226,14 +226,12 @@
         <task xsi:type="configure" id="hdp_2_2_0_0_historyserver_classpath"/>
       </execute-stage>
 
-
       <!--Changes for stack 2.3-->
       <!--HDFS-->
       <execute-stage service="HDFS" component="NAMENODE" title="Apply config changes for NameNode">
         <task xsi:type="configure" id="hdp_2_3_0_0_modify_hadoop_env"/>
       </execute-stage>
 
-
       <!--YARN-->
       <execute-stage service="MAPREDUCE2" component="HISTORYSERVER" title="Apply config changes for HistoryServer">
         <task xsi:type="configure" id="hdp_2_3_0_0_mapreduce2_adjust_history_server"/>
@@ -280,7 +278,6 @@
         <task xsi:type="configure" id="hdp_2_3_0_0_hbase_master_adjust_authorization_coprocessors"/>
       </execute-stage>
 
-
       <!--TEZ-->
       <execute-stage service="TEZ" component="TEZ_CLIENT" title="Apply config changes for Tez">
         <task xsi:type="configure" id="hdp_2_3_0_0_tez_client_adjust_properties"/>
@@ -296,7 +293,15 @@
 
       <!--HIVE-->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
-        <task xsi:type="configure" id="hdp_2_2_0_0_hive_server_keberized_token_store_class"/>
+        <task xsi:type="server_action" summary="Calculating Kerberos Properties for Hive" class="org.apache.ambari.server.serveraction.upgrades.HiveKerberosConfigAction" />
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
+        <task xsi:type="configure" id="hdp_2_2_0_0_hive_server_kerberized_token_store_class"/>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_remove_datastore_classname"/>
       </execute-stage>
 
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">

http://git-wip-us.apache.org/repos/asf/ambari/blob/9535a51e/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
index 1eab4ca..b134670 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
@@ -70,11 +70,11 @@
             </condition>
           </definition>
 
-          <definition xsi:type="configure" id="hdp_2_2_0_0_hive_server_keberized_token_store_class">
+          <definition xsi:type="configure" id="hdp_2_2_0_0_hive_server_kerberized_token_store_class">
             <condition type="cluster-env" key="security_enabled" value="true">
               <type>hive-site</type>
               <key>hive.cluster.delegation.token.store.class</key>
-              <value>org.apache.hadoop.hive.thrift.DBTokenStore</value>
+              <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
             </condition>
           </definition>
         </changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9535a51e/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
index d27b217..5d0a968 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.3.xml
@@ -444,6 +444,10 @@
         <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_remove_deprecated_ranger_properties"/>
       </execute-stage>
 
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
+        <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_remove_datastore_classname"/>
+      </execute-stage>
+
       <execute-stage service="HIVE" component="WEBHCAT_SERVER" title="Apply config changes for WebHCat Server">
         <task xsi:type="configure" id="hdp_2_3_0_0_webhcat_server_update_env"/>
       </execute-stage>
@@ -452,7 +456,6 @@
         <task xsi:type="configure" id="hdp_2_3_0_0_webhcat_server_update_configuration_paths"/>
       </execute-stage>
 
-
       <!--OOZIE-->
       <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie Server">
         <task xsi:type="configure" id="hdp_2_3_0_0_oozie_remove_redundant_configurations"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9535a51e/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
index 289e790..769688c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
@@ -666,18 +666,13 @@
           </task>
 
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_set_transport_mode"/>
-          
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_replace_auth_manager"/>
-
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_authentication"/>
-
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_policy"/>
-
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_security"/>
-
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_configure_ranger_audit"/>
-
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_remove_deprecated_ranger_properties"/>
+          <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_remove_datastore_classname"/>
         </pre-upgrade>
 
         <pre-downgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9535a51e/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
index 51c6029..bdb4808 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/config-upgrade.xml
@@ -660,6 +660,11 @@
               <value>10001</value>
             </condition>
           </definition>
+
+          <definition xsi:type="configure" id="hdp_2_3_0_0_hive_server_remove_datastore_classname">
+            <type>hive-site</type>
+            <transfer operation="delete" delete-key="datanucleus.rdbms.datastoreAdapterClassName"/>
+          </definition>
         </changes>
       </component>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/9535a51e/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveKerberosConfigActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveKerberosConfigActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveKerberosConfigActionTest.java
new file mode 100644
index 0000000..c050ec4
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveKerberosConfigActionTest.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.serveraction.upgrades;
+
+import java.lang.reflect.Field;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.easymock.EasyMock;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.mockito.PowerMockito;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+/**
+ * Tests {@link HiveKerberosConfigAction} to ensure that the correct properties
+ * are set.
+ */
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(ZooKeeperQuorumCalculator.class)
+public class HiveKerberosConfigActionTest {
+
+  private static final String CLUSTER_NAME = "c1";
+  private HiveKerberosConfigAction m_action = null;
+
+  private Clusters m_clusters = EasyMock.createStrictMock(Clusters.class);
+  private Cluster m_cluster = EasyMock.createStrictMock(Cluster.class);
+  private Config m_clusterEnvConfig = EasyMock.createStrictMock(Config.class);
+  private Config m_hiveSiteConfig = EasyMock.createStrictMock(Config.class);
+  private ExecutionCommand m_executionCommand = EasyMock.createNiceMock(ExecutionCommand.class);
+
+  /**
+   * Sets up some generic mocks before the test.
+   *
+   * @throws Exception
+   */
+  @Before
+  public void before() throws Exception {
+    m_action = new HiveKerberosConfigAction();
+
+    // setup clusters->cluster mock
+    EasyMock.expect(m_executionCommand.getClusterName()).andReturn(CLUSTER_NAME).atLeastOnce();
+    EasyMock.expect(m_clusters.getCluster(CLUSTER_NAME)).andReturn(m_cluster).atLeastOnce();
+
+    // set the mock objects on the class under test
+    Field m_clusterField = HiveKerberosConfigAction.class.getDeclaredField("m_clusters");
+    m_clusterField.setAccessible(true);
+    m_clusterField.set(m_action, m_clusters);
+    m_action.setExecutionCommand(m_executionCommand);
+
+  }
+
+  /**
+   * Tests that nothing is set if Kerberos is not enabled.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testKerberosNotEnabled() throws Exception {
+    Map<String, String> clusterEnvProperties = new HashMap<>();
+    clusterEnvProperties.put(HiveKerberosConfigAction.CLUSTER_ENV_SECURITY_ENABLED, "false");
+
+    EasyMock.expect(m_clusterEnvConfig.getProperties()).andReturn(clusterEnvProperties).atLeastOnce();
+    EasyMock.expect(m_cluster.getDesiredConfigByType(HiveKerberosConfigAction.CLUSTER_ENV_CONFIG_TYPE)).andReturn(m_clusterEnvConfig).atLeastOnce();
+    EasyMock.replay(m_executionCommand, m_clusters, m_cluster, m_clusterEnvConfig, m_hiveSiteConfig);
+
+    m_action.execute(null);
+
+    EasyMock.verify(m_executionCommand, m_clusters, m_cluster, m_clusterEnvConfig, m_hiveSiteConfig);
+  }
+
+  /**
+   * Tests that nothing is set if Kerberos is not enabled.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testKerberosNotEnabledMissingClusterEnv() throws Exception {
+    EasyMock.expect(m_cluster.getDesiredConfigByType(HiveKerberosConfigAction.CLUSTER_ENV_CONFIG_TYPE)).andReturn(null).atLeastOnce();
+    EasyMock.replay(m_executionCommand, m_clusters, m_cluster, m_clusterEnvConfig, m_hiveSiteConfig);
+
+    m_action.execute(null);
+
+    EasyMock.verify(m_executionCommand, m_clusters, m_cluster, m_clusterEnvConfig, m_hiveSiteConfig);
+  }
+  
+  /**
+   * Tests that nothing is set if Kerberos is not enabled.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testKerberosNotEnabledMissingSecurityInformation() throws Exception {
+    Map<String, String> clusterEnvProperties = new HashMap<>();
+
+    EasyMock.expect(m_clusterEnvConfig.getProperties()).andReturn(clusterEnvProperties).atLeastOnce();
+    EasyMock.expect(m_cluster.getDesiredConfigByType(HiveKerberosConfigAction.CLUSTER_ENV_CONFIG_TYPE)).andReturn(m_clusterEnvConfig).atLeastOnce();
+    EasyMock.replay(m_executionCommand, m_clusters, m_cluster, m_clusterEnvConfig, m_hiveSiteConfig);
+
+    m_action.execute(null);
+
+    EasyMock.verify(m_executionCommand, m_clusters, m_cluster, m_clusterEnvConfig, m_hiveSiteConfig);
+  }  
+
+  /**
+   * Tests that the correct properties are set when Kerberos is enabled.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testKerberosEnabled() throws Exception {
+    final String zookeeperQuorum = "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181";
+
+    PowerMockito.mockStatic(ZooKeeperQuorumCalculator.class);
+    PowerMockito.when(ZooKeeperQuorumCalculator.getZooKeeperQuorumString(m_cluster)).thenReturn(
+        zookeeperQuorum);
+
+    Map<String, String> clusterEnvProperties = new HashMap<>();
+    Map<String, String> hiveSiteProperties = new HashMap<>();
+    clusterEnvProperties.put(HiveKerberosConfigAction.CLUSTER_ENV_SECURITY_ENABLED, "true");
+
+    EasyMock.expect(m_clusterEnvConfig.getProperties()).andReturn(clusterEnvProperties).atLeastOnce();
+    EasyMock.expect(m_hiveSiteConfig.getProperties()).andReturn(hiveSiteProperties).atLeastOnce();
+
+    m_hiveSiteConfig.setProperties(EasyMock.anyObject(Map.class));
+    EasyMock.expectLastCall().once();
+
+    m_hiveSiteConfig.persist(false);
+    EasyMock.expectLastCall().once();
+
+    EasyMock.expect(m_cluster.getDesiredConfigByType(HiveKerberosConfigAction.CLUSTER_ENV_CONFIG_TYPE)).andReturn(m_clusterEnvConfig).atLeastOnce();
+    EasyMock.expect(m_cluster.getDesiredConfigByType(HiveKerberosConfigAction.HIVE_SITE_CONFIG_TYPE)).andReturn(m_hiveSiteConfig).atLeastOnce();
+
+    EasyMock.replay(m_executionCommand, m_clusters, m_cluster, m_clusterEnvConfig, m_hiveSiteConfig);
+
+    m_action.execute(null);
+
+    EasyMock.verify(m_executionCommand, m_clusters, m_cluster, m_clusterEnvConfig, m_hiveSiteConfig);
+
+    Assert.assertEquals(zookeeperQuorum, hiveSiteProperties.get(HiveKerberosConfigAction.HIVE_SITE_ZK_QUORUM));
+    Assert.assertEquals(zookeeperQuorum, hiveSiteProperties.get(HiveKerberosConfigAction.HIVE_SITE_ZK_CONNECT_STRING));
+  }
+
+}