You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rn...@apache.org on 2015/04/23 19:38:13 UTC

ambari git commit: AMBARI-10489. Blueprints support for Hive Server HA clusters. (rnettleton)

Repository: ambari
Updated Branches:
  refs/heads/trunk e8e7c827d -> 727b76f0f


AMBARI-10489. Blueprints support for Hive Server HA clusters.  (rnettleton)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/727b76f0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/727b76f0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/727b76f0

Branch: refs/heads/trunk
Commit: 727b76f0f8e5b7bf8f965adb531b0c08950834b3
Parents: e8e7c82
Author: Bob Nettleton <rn...@hortonworks.com>
Authored: Thu Apr 23 13:36:10 2015 -0400
Committer: Bob Nettleton <rn...@hortonworks.com>
Committed: Thu Apr 23 13:37:56 2015 -0400

----------------------------------------------------------------------
 .../BlueprintConfigurationProcessor.java        | 171 ++++++++-
 .../stacks/HDP/2.2/services/HIVE/metainfo.xml   |   7 +
 .../BlueprintConfigurationProcessorTest.java    | 361 ++++++++++++++++++-
 3 files changed, 523 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/727b76f0/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index cec93bf..9c3266a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -22,6 +22,7 @@ package org.apache.ambari.server.controller.internal;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedHashSet;
@@ -343,6 +344,16 @@ public class BlueprintConfigurationProcessor {
       && configProperties.get("oozie-site").get("oozie.services.ext").contains("org.apache.oozie.service.ZKLocksService");
   }
 
+  /**
+   * Static convenience function to determine if HiveServer HA is enabled
+   * @param configProperties configuration properties for this cluster
+   * @return true if HiveServer HA is enabled
+   *         false if HiveServer HA is not enabled
+   */
+  static boolean isHiveServerHAEnabled(Map<String, Map<String, String>> configProperties) {
+    return configProperties.containsKey("hive-site") && configProperties.get("hive-site").containsKey("hive.server2.support.dynamic.service.discovery")
+      && configProperties.get("hive-site").get("hive.server2.support.dynamic.service.discovery").equals("true");
+  }
 
   /**
    * Convenience method to examine the current configuration, to determine
@@ -730,11 +741,26 @@ public class BlueprintConfigurationProcessor {
 
             if ((isOozieServerHAEnabled(properties)) && isComponentOozieServer() && (matchingGroups.size() > 1))     {
               if (!origValue.contains("localhost")) {
-                // if this Oozie property is a FQDN, then simply return i
+                // if this Oozie property is a FQDN, then simply return it
+                return origValue;
+              }
+            }
+
+            if ((isHiveServerHAEnabled(properties)) && isComponentHiveServer() && (matchingGroups.size() > 1)) {
+              if (!origValue.contains("localhost")) {
+                // if this Hive property is a FQDN, then simply return it
+                return origValue;
+              }
+            }
+
+            if ((isComponentHiveMetaStoreServer()) && matchingGroups.size() > 1) {
+              if (!origValue.contains("localhost")) {
+                // if this Hive MetaStore property is a FQDN, then simply return it
                 return origValue;
               }
             }
 
+
             throw new IllegalArgumentException("Unable to update configuration property " + "'" + propertyName + "'"+ " with topology information. " +
               "Component '" + component + "' is not mapped to any host group or is mapped to multiple groups.");
           }
@@ -787,6 +813,28 @@ public class BlueprintConfigurationProcessor {
     }
 
     /**
+     * Utility method to determine if the component associated with this updater
+     * instance is a Hive Server
+     *
+     * @return true if the component associated is a Hive Server
+     *         false if the component is not a Hive Server
+     */
+    private boolean isComponentHiveServer() {
+      return component.equals("HIVE_SERVER");
+    }
+
+    /**
+     * Utility method to determine if the component associated with this updater
+     * instance is a Hive MetaStore Server
+     *
+     * @return true if the component associated is a Hive MetaStore Server
+     *         false if the component is not a Hive MetaStore Server
+     */
+    private boolean isComponentHiveMetaStoreServer() {
+      return component.equals("HIVE_METASTORE");
+    }
+
+    /**
      * Provides access to the name of the component associated
      *   with this updater instance.
      *
@@ -914,12 +962,21 @@ public class BlueprintConfigurationProcessor {
     private final Character separator;
 
     /**
+     * Flag to determine if a URL scheme detected as
+     * a prefix in the property should be repeated across
+     * all hosts in the property
+     */
+    private final boolean usePrefixForEachHost;
+
+    private final Set<String> setOfKnownURLSchemes = Collections.singleton("thrift://");
+
+    /**
      * Constructor.
      *
      * @param component  component name associated with the property
      */
     public MultipleHostTopologyUpdater(String component) {
-      this(component, DEFAULT_SEPARATOR);
+      this(component, DEFAULT_SEPARATOR, false);
     }
 
     /**
@@ -929,9 +986,10 @@ public class BlueprintConfigurationProcessor {
      * @param separator the separator character to use when multiple hosts
      *                  are specified in a property or URL
      */
-    public MultipleHostTopologyUpdater(String component, Character separator) {
+    public MultipleHostTopologyUpdater(String component, Character separator, boolean userPrefixForEachHost) {
       this.component = component;
       this.separator = separator;
+      this.usePrefixForEachHost = userPrefixForEachHost;
     }
 
     /**
@@ -953,6 +1011,8 @@ public class BlueprintConfigurationProcessor {
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
                                          Stack stackDefinition) {
+      StringBuilder sb = new StringBuilder();
+
       if (!origValue.contains("%HOSTGROUP") &&
         (!origValue.contains("localhost"))) {
         // this property must contain FQDNs specified directly by the user
@@ -960,14 +1020,26 @@ public class BlueprintConfigurationProcessor {
         return origValue;
       }
 
+      String prefix = null;
       Collection<String> hostStrings = getHostStrings(hostGroups, origValue);
       if (hostStrings.isEmpty()) {
         //default non-exported original value
         String port = null;
-        if (origValue.contains(":")) {
-          //todo: currently assuming all hosts are using same port
-          port = origValue.substring(origValue.indexOf(":") + 1);
+        for (String urlScheme : setOfKnownURLSchemes) {
+          if (origValue.startsWith(urlScheme)) {
+            prefix = urlScheme;
+          }
+        }
+
+        if (prefix != null) {
+          String valueWithoutPrefix = origValue.substring(prefix.length());
+          port = calculatePort(valueWithoutPrefix);
+          sb.append(prefix);
+        } else {
+          port = calculatePort(origValue);
         }
+
+
         Collection<HostGroup> matchingGroups = getHostGroupsForComponent(component, hostGroups.values());
         for (HostGroup group : matchingGroups) {
           for (String host : group.getHostInfo()) {
@@ -979,8 +1051,10 @@ public class BlueprintConfigurationProcessor {
         }
       }
 
-      StringBuilder sb = new StringBuilder();
+
+
       String suffix = null;
+
       // parse out prefix if one exists
       Matcher matcher = HOSTGROUP_PORT_REGEX.matcher(origValue);
       if (matcher.find()) {
@@ -988,7 +1062,8 @@ public class BlueprintConfigurationProcessor {
         // handle the case of a YAML config property
         if ((indexOfStart > 0) && (!origValue.substring(0, indexOfStart).equals("['"))) {
           // append prefix before adding host names
-          sb.append(origValue.substring(0, indexOfStart));
+          prefix = origValue.substring(0, indexOfStart);
+          sb.append(prefix);
         }
 
         // parse out suffix if one exists
@@ -1008,9 +1083,16 @@ public class BlueprintConfigurationProcessor {
       for (String host : hostStrings) {
         if (!firstHost) {
           sb.append(separator);
+          // support config properties that use a list of full URIs
+          if (usePrefixForEachHost && (prefix != null)) {
+            sb.append(prefix);
+          }
         } else {
           firstHost = false;
         }
+
+
+
         sb.append(host);
       }
 
@@ -1021,6 +1103,15 @@ public class BlueprintConfigurationProcessor {
 
       return sb.toString();
     }
+
+    private static String calculatePort(String origValue) {
+      if (origValue.contains(":")) {
+        //todo: currently assuming all hosts are using same port
+        return origValue.substring(origValue.indexOf(":") + 1);
+      }
+
+      return null;
+    }
   }
 
   /**
@@ -1151,6 +1242,64 @@ public class BlueprintConfigurationProcessor {
     }
   }
 
+
+  /**
+   * Custom PropertyUpdater that handles the parsing and updating of the
+   * "templeton.hive.properties" configuration property for WebHCat.
+   * This particular configuration property uses a format of
+   * comma-separated key/value pairs.  The Values in the case of the
+   * hive.metastores.uri property can also contain commas, and so character
+   * escaping with a backslash (\) must take place during substitution.
+   *
+   */
+  private static class TempletonHivePropertyUpdater implements PropertyUpdater {
+
+    private Map<String, PropertyUpdater> mapOfKeysToUpdaters =
+      new HashMap<String, PropertyUpdater>();
+
+    TempletonHivePropertyUpdater() {
+      // the only known property that requires hostname substitution is hive.metastore.uris,
+      // but this updater should be flexible enough for other properties in the future.
+      mapOfKeysToUpdaters.put("hive.metastore.uris", new MultipleHostTopologyUpdater("HIVE_METASTORE", ',', true));
+    }
+
+    @Override
+    public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups, String propertyName, String origValue, Map<String, Map<String, String>> properties, Stack stackDefinition) {
+      // short-circuit out any custom property values defined by the deployer
+      if (!origValue.contains("%HOSTGROUP") &&
+        (!origValue.contains("localhost"))) {
+        // this property must contain FQDNs specified directly by the user
+        // of the Blueprint, so the processor should not attempt to update them
+        return origValue;
+      }
+
+      StringBuffer updatedResult = new StringBuffer();
+
+      // split out the key/value pairs
+      String[] keyValuePairs = origValue.split(",");
+      boolean firstValue = true;
+      for (String keyValuePair : keyValuePairs) {
+        if (!firstValue) {
+          updatedResult.append(",");
+        } else {
+          firstValue = false;
+        }
+
+        String key = keyValuePair.split("=")[0];
+        if (mapOfKeysToUpdaters.containsKey(key)) {
+          String result = mapOfKeysToUpdaters.get(key).updateForClusterCreate(hostGroups, key, keyValuePair.split("=")[1], properties, stackDefinition);
+          // append the internal property result, escape out any commas in the internal property,
+          // this is required due to the specific syntax of templeton.hive.properties
+          updatedResult.append(key + "=" + result.replaceAll(",", Matcher.quoteReplacement("\\,")));
+        } else {
+          updatedResult.append(keyValuePair);
+        }
+      }
+
+      return updatedResult.toString();
+    }
+  }
+
   /**
    * Register updaters for configuration properties.
    */
@@ -1239,7 +1388,7 @@ public class BlueprintConfigurationProcessor {
     hbaseSiteMap.put("hbase.rootdir", new SingleHostTopologyUpdater("NAMENODE"));
     accumuloSiteMap.put("instance.volumes", new SingleHostTopologyUpdater("NAMENODE"));
     // HDFS shared.edits JournalNode Quorum URL uses semi-colons as separators
-    multiHdfsSiteMap.put("dfs.namenode.shared.edits.dir", new MultipleHostTopologyUpdater("JOURNALNODE", ';'));
+    multiHdfsSiteMap.put("dfs.namenode.shared.edits.dir", new MultipleHostTopologyUpdater("JOURNALNODE", ';', false));
 
     // SECONDARY_NAMENODE
     hdfsSiteMap.put("dfs.secondary.http.address", new SingleHostTopologyUpdater("SECONDARY_NAMENODE"));
@@ -1271,13 +1420,13 @@ public class BlueprintConfigurationProcessor {
 
 
     // HIVE_SERVER
-    hiveSiteMap.put("hive.metastore.uris", new SingleHostTopologyUpdater("HIVE_SERVER"));
+    multiHiveSiteMap.put("hive.metastore.uris", new MultipleHostTopologyUpdater("HIVE_METASTORE", ',', true));
     dbHiveSiteMap.put("javax.jdo.option.ConnectionURL",
         new DBTopologyUpdater("MYSQL_SERVER", "hive-env", "hive_database"));
     multiCoreSiteMap.put("hadoop.proxyuser.hive.hosts", new MultipleHostTopologyUpdater("HIVE_SERVER"));
     multiCoreSiteMap.put("hadoop.proxyuser.HTTP.hosts", new MultipleHostTopologyUpdater("WEBHCAT_SERVER"));
     multiCoreSiteMap.put("hadoop.proxyuser.hcat.hosts", new MultipleHostTopologyUpdater("WEBHCAT_SERVER"));
-    multiWebhcatSiteMap.put("templeton.hive.properties", new SingleHostTopologyUpdater("HIVE_METASTORE"));
+    multiWebhcatSiteMap.put("templeton.hive.properties", new TempletonHivePropertyUpdater());
     multiWebhcatSiteMap.put("templeton.kerberos.principal", new MultipleHostTopologyUpdater("WEBHCAT_SERVER"));
     hiveEnvMap.put("hive_hostname", new SingleHostTopologyUpdater("HIVE_SERVER"));
     multiHiveSiteMap.put("hive.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/727b76f0/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml
index fb9a596..8eb5f28 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml
@@ -48,7 +48,14 @@
               <config-type>hiveserver2-site</config-type>
           </configuration-dependencies>
         </component>
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <cardinality>1+</cardinality>
+        </component>
+
       </components>
+
+
       
       <osSpecifics>
         <osSpecific>

http://git-wip-us.apache.org/repos/asf/ambari/blob/727b76f0/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index aa9fa00..6c26b06 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -1368,9 +1368,6 @@ public class BlueprintConfigurationProcessorTest {
 
     HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
 
-    expect(mockHostGroupOne.getComponents()).andReturn(Collections.singleton("HIVE_METASTORE")).atLeastOnce();
-    expect(mockHostGroupOne.getHostInfo()).andReturn(Collections.singleton("test-host-one")).atLeastOnce();
-
     mockSupport.replayAll();
 
     Map<String, Map<String, String>> configProperties =
@@ -1405,6 +1402,208 @@ public class BlueprintConfigurationProcessorTest {
   }
 
   @Test
+  public void testHiveConfigClusterUpdateCustomValueSpecifyingHostNamesMetaStoreHA() throws Exception {
+    final String expectedHostGroupName = "host_group_1";
+
+    final String expectedPropertyValue =
+      "hive.metastore.local=false,hive.metastore.uris=thrift://headnode0.ivantestcluster2-ssh.d1.internal.cloudapp.net:9083,hive.user.install.directory=/user";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    Stack mockStack = mockSupport.createMock(Stack.class);
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> webHCatSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("webhcat-site", webHCatSiteProperties);
+
+    // setup properties that include host information
+    webHCatSiteProperties.put("templeton.hive.properties",
+      expectedPropertyValue);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    Map<String, HostGroup> mapOfHostGroups =
+      new HashMap<String, HostGroup>();
+    mapOfHostGroups.put(expectedHostGroupName, mockHostGroupOne);
+    mapOfHostGroups.put("host_group_2", mockHostGroupTwo);
+
+    // call top-level cluster config update method
+    configProcessor.doUpdateForClusterCreate(mapOfHostGroups, mockStack);
+
+    assertEquals("Unexpected config update for templeton.hive.properties",
+      expectedPropertyValue,
+      webHCatSiteProperties.get("templeton.hive.properties"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testHiveConfigClusterUpdateSpecifyingHostNamesHiveServer2HA() throws Exception {
+    final String expectedHostGroupName = "host_group_1";
+
+    final String expectedPropertyValue =
+      "c6401.ambari.apache.org";
+
+    final String expectedMetaStoreURIs = "thrift://c6401.ambari.apache.org:9083,thrift://c6402.ambari.apache.org:9083";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    Stack mockStack = mockSupport.createMock(Stack.class);
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getComponents()).andReturn(Collections.singleton("HIVE_SERVER")).atLeastOnce();
+    expect(mockHostGroupTwo.getComponents()).andReturn(Collections.singleton("HIVE_SERVER")).atLeastOnce();
+
+    // simulate stack definition for HIVE_SERVER
+    expect(mockStack.getCardinality("HIVE_SERVER")).andReturn(new Cardinality("1+")).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hiveEnvProperties =
+      new HashMap<String, String>();
+    Map<String, String> hiveSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hive-env", hiveEnvProperties);
+    configProperties.put("hive-site", hiveSiteProperties);
+
+    // setup properties that include host information
+    hiveEnvProperties.put("hive_hostname",
+      expectedPropertyValue);
+
+    // simulate HA mode, since this property must be present in HiveServer2 HA
+    hiveSiteProperties.put("hive.server2.support.dynamic.service.discovery", "true");
+
+    // set MetaStore URIs property to reflect an HA environment for HIVE_METASTORE
+
+    hiveSiteProperties.put("hive.metastore.uris", expectedMetaStoreURIs);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    Map<String, HostGroup> mapOfHostGroups =
+      new HashMap<String, HostGroup>();
+    mapOfHostGroups.put(expectedHostGroupName, mockHostGroupOne);
+    mapOfHostGroups.put("host_group_2", mockHostGroupTwo);
+
+    // call top-level cluster config update method
+    configProcessor.doUpdateForClusterCreate(mapOfHostGroups, mockStack);
+
+    assertEquals("Unexpected config update for hive_hostname",
+      expectedPropertyValue,
+      hiveEnvProperties.get("hive_hostname"));
+
+    assertEquals("Unexpected config update for hive.metastore.uris",
+      expectedMetaStoreURIs,
+      hiveSiteProperties.get("hive.metastore.uris"));
+
+      mockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testHiveConfigClusterUpdateUsingExportedNamesHiveServer2HA() throws Exception {
+    final String expectedHostGroupNameOne = "host_group_1";
+    final String expectedHostGroupNameTwo = "host_group_2";
+
+    final String expectedHostNameOne =
+      "c6401.ambari.apache.org";
+
+    final String expectedHostNameTwo =
+      "c6402.ambari.apache.org";
+
+
+    // use exported HOSTGROUP syntax for this property, to make sure the
+    // config processor updates this as expected
+    final String inputMetaStoreURIs = "thrift://" + createExportedAddress("9083", expectedHostGroupNameOne) + "," + "thrift://" + createExportedAddress("9083", expectedHostGroupNameTwo);
+
+    final String expectedMetaStoreURIs = "thrift://c6401.ambari.apache.org:9083,thrift://c6402.ambari.apache.org:9083";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    Stack mockStack = mockSupport.createMock(Stack.class);
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Collections.singleton(expectedHostNameOne)).atLeastOnce();
+    expect(mockHostGroupTwo.getHostInfo()).andReturn(Collections.singleton(expectedHostNameTwo)).atLeastOnce();
+
+
+    Set<String> setOfComponents = new HashSet<String>();
+    setOfComponents.add("HIVE_SERVER");
+    setOfComponents.add("HIVE_METASTORE");
+
+    expect(mockHostGroupOne.getComponents()).andReturn(setOfComponents).atLeastOnce();
+    expect(mockHostGroupTwo.getComponents()).andReturn(setOfComponents).atLeastOnce();
+
+    // simulate stack definition for HIVE_SERVER
+    expect(mockStack.getCardinality("HIVE_SERVER")).andReturn(new Cardinality("1+")).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hiveEnvProperties =
+      new HashMap<String, String>();
+    Map<String, String> hiveSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hive-env", hiveEnvProperties);
+    configProperties.put("hive-site", hiveSiteProperties);
+
+    // setup properties that include host information
+    hiveEnvProperties.put("hive_hostname",
+      expectedHostNameOne);
+
+    // simulate HA mode, since this property must be present in HiveServer2 HA
+    hiveSiteProperties.put("hive.server2.support.dynamic.service.discovery", "true");
+
+    // set MetaStore URIs property to reflect an HA environment for HIVE_METASTORE
+
+    hiveSiteProperties.put("hive.metastore.uris", inputMetaStoreURIs);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    Map<String, HostGroup> mapOfHostGroups =
+      new HashMap<String, HostGroup>();
+    mapOfHostGroups.put(expectedHostGroupNameOne, mockHostGroupOne);
+    mapOfHostGroups.put(expectedHostGroupNameTwo, mockHostGroupTwo);
+
+    // call top-level cluster config update method
+    configProcessor.doUpdateForClusterCreate(mapOfHostGroups, mockStack);
+
+    assertEquals("Unexpected config update for hive_hostname",
+      expectedHostNameOne,
+      hiveEnvProperties.get("hive_hostname"));
+
+    assertEquals("Unexpected config update for hive.metastore.uris",
+      expectedMetaStoreURIs,
+      hiveSiteProperties.get("hive.metastore.uris"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
   public void testHiveConfigClusterUpdateDefaultValue() throws Exception {
     final String expectedHostGroupName = "host_group_1";
     final String expectedHostName = "c6401.ambari.apache.org";
@@ -1452,6 +1651,63 @@ public class BlueprintConfigurationProcessorTest {
   }
 
   @Test
+  public void testHiveConfigClusterUpdateDefaultValueWithMetaStoreHA() throws Exception {
+    final String expectedHostGroupName = "host_group_1";
+    final String expectedHostNameOne = "c6401.ambari.apache.org";
+    final String expectedHostNameTwo = "c6402.ambari.apache.org";
+
+    final String expectedPropertyValue =
+      "hive.metastore.local=false,hive.metastore.uris=thrift://localhost:9933,hive.metastore.sasl.enabled=false";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    Stack mockStack = mockSupport.createMock(Stack.class);
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getComponents()).andReturn(Collections.singleton("HIVE_METASTORE")).atLeastOnce();
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Collections.singleton(expectedHostNameOne)).atLeastOnce();
+
+    expect(mockHostGroupTwo.getComponents()).andReturn(Collections.singleton("HIVE_METASTORE")).atLeastOnce();
+    expect(mockHostGroupTwo.getHostInfo()).andReturn(Collections.singleton(expectedHostNameTwo)).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> webHCatSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("webhcat-site", webHCatSiteProperties);
+
+    // setup properties that include host information
+    webHCatSiteProperties.put("templeton.hive.properties",
+      expectedPropertyValue);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    Map<String, HostGroup> mapOfHostGroups =
+      new HashMap<String, HostGroup>();
+    mapOfHostGroups.put(expectedHostGroupName, mockHostGroupOne);
+    mapOfHostGroups.put("host_group_2", mockHostGroupTwo);
+
+    // call top-level cluster config update method
+    configProcessor.doUpdateForClusterCreate(mapOfHostGroups, mockStack);
+
+    // verify that the host name for the metastore.uris property has been updated, and
+    // that both MetaStore Server URIs are included, using the required Hive Syntax
+    assertEquals("Unexpected config update for templeton.hive.properties",
+      "hive.metastore.local=false,hive.metastore.uris=thrift://" + expectedHostNameOne + ":9933\\," + "thrift://" + expectedHostNameTwo + ":9933" + "," + "hive.metastore.sasl.enabled=false",
+      webHCatSiteProperties.get("templeton.hive.properties"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
   public void testHiveConfigClusterUpdateExportedHostGroupValue() throws Exception {
     final String expectedHostGroupName = "host_group_1";
     final String expectedHostName = "c6401.ambari.apache.org";
@@ -2784,7 +3040,99 @@ public class BlueprintConfigurationProcessorTest {
 
 
     // setup properties that include host information
-    hiveSiteProperties.put("hive.metastore.uris", expectedHostName + ":" + expectedPortNum);
+    hiveSiteProperties.put("hive.metastore.uris", "thrift://" + expectedHostName + ":" + expectedPortNum);
+    hiveSiteProperties.put("javax.jdo.option.ConnectionURL", expectedHostName + ":" + expectedPortNum);
+    hiveSiteProperties.put("hive.zookeeper.quorum", expectedHostName + ":" + expectedPortNum + "," + expectedHostNameTwo + ":" + expectedPortNum);
+    hiveSiteProperties.put("hive.cluster.delegation.token.store.zookeeper.connectString", expectedHostName + ":" + expectedPortNum + "," + expectedHostNameTwo + ":" + expectedPortNum);
+    hiveEnvProperties.put("hive_hostname", expectedHostName);
+
+
+    webHCatSiteProperties.put("templeton.hive.properties", expectedHostName + "," + expectedHostNameTwo);
+    webHCatSiteProperties.put("templeton.kerberos.principal", expectedHostName);
+
+    coreSiteProperties.put("hadoop.proxyuser.hive.hosts", expectedHostName + "," + expectedHostNameTwo);
+    coreSiteProperties.put("hadoop.proxyuser.HTTP.hosts", expectedHostName + "," + expectedHostNameTwo);
+    coreSiteProperties.put("hadoop.proxyuser.hcat.hosts", expectedHostName + "," + expectedHostNameTwo);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    // call top-level export method
+    configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne, mockHostGroupTwo));
+
+    assertEquals("hive property not properly exported",
+      "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("hive.metastore.uris"));
+    assertEquals("hive property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName), hiveEnvProperties.get("hive_hostname"));
+
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo),
+      webHCatSiteProperties.get("templeton.hive.properties"));
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName), webHCatSiteProperties.get("templeton.kerberos.principal"));
+
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hive.hosts"));
+
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.HTTP.hosts"));
+
+    assertEquals("hive property not properly exported",
+      createExportedHostName(expectedHostGroupName) + "," + createExportedHostName(expectedHostGroupNameTwo), coreSiteProperties.get("hadoop.proxyuser.hcat.hosts"));
+
+    assertEquals("hive zookeeper quorum property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.zookeeper.quorum"));
+
+    assertEquals("hive zookeeper connectString property not properly exported",
+      createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo),
+      hiveSiteProperties.get("hive.cluster.delegation.token.store.zookeeper.connectString"));
+
+    mockSupport.verifyAll();
+  }
+
+  @Test
+  public void testHiveConfigExportedMultipleHiveMetaStoreServers() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "c6402.ambari.apache.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+    final String expectedHostGroupNameTwo = "host_group_2";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupTwo.getHostInfo()).andReturn(Arrays.asList(expectedHostNameTwo, "serverTwo")).atLeastOnce();
+    expect(mockHostGroupOne.getName()).andReturn(expectedHostGroupName).atLeastOnce();
+    expect(mockHostGroupTwo.getName()).andReturn(expectedHostGroupNameTwo).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hiveSiteProperties =
+      new HashMap<String, String>();
+    Map<String, String> hiveEnvProperties =
+      new HashMap<String, String>();
+    Map<String, String> webHCatSiteProperties =
+      new HashMap<String, String>();
+    Map<String, String> coreSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hive-site", hiveSiteProperties);
+    configProperties.put("hive-env", hiveEnvProperties);
+    configProperties.put("webhcat-site", webHCatSiteProperties);
+    configProperties.put("core-site", coreSiteProperties);
+
+
+    // setup properties that include host information
+    hiveSiteProperties.put("hive.metastore.uris", "thrift://" + expectedHostName + ":" + expectedPortNum + "," + "thrift://" + expectedHostNameTwo + ":" + expectedPortNum);
     hiveSiteProperties.put("javax.jdo.option.ConnectionURL", expectedHostName + ":" + expectedPortNum);
     hiveSiteProperties.put("hive.zookeeper.quorum", expectedHostName + ":" + expectedPortNum + "," + expectedHostNameTwo + ":" + expectedPortNum);
     hiveSiteProperties.put("hive.cluster.delegation.token.store.zookeeper.connectString", expectedHostName + ":" + expectedPortNum + "," + expectedHostNameTwo + ":" + expectedPortNum);
@@ -2804,8 +3152,11 @@ public class BlueprintConfigurationProcessorTest {
     // call top-level export method
     configProcessor.doUpdateForBlueprintExport(Arrays.asList(mockHostGroupOne, mockHostGroupTwo));
 
+
+    System.out.println("RWN: exported value of hive.metastore.uris = " + hiveSiteProperties.get("hive.metastore.uris"));
+
     assertEquals("hive property not properly exported",
-      createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("hive.metastore.uris"));
+      "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + "," + "thrift://" + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo), hiveSiteProperties.get("hive.metastore.uris"));
     assertEquals("hive property not properly exported",
       createExportedAddress(expectedPortNum, expectedHostGroupName), hiveSiteProperties.get("javax.jdo.option.ConnectionURL"));
     assertEquals("hive property not properly exported",