You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rl...@apache.org on 2017/06/30 09:46:17 UTC

[01/33] ambari git commit: AMBARI-21206 - Remove Zookeeper as a required service from YARN

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-20859 12ae25915 -> 0945f28e6


AMBARI-21206 - Remove Zookeeper as a required service from YARN


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a2464b90
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a2464b90
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a2464b90

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: a2464b9045637c1d5014db4aff7d83a0bc573fc0
Parents: 23cc628
Author: Tim Thorpe <tt...@apache.org>
Authored: Mon Jun 26 07:58:15 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Mon Jun 26 07:58:15 2017 -0700

----------------------------------------------------------------------
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  |  10 +-
 .../common-services/YARN/3.0.0.3.0/metainfo.xml |  46 ++++++-
 .../YARN/3.0.0.3.0/service_advisor.py           |  53 +++++++-
 .../stacks/HDP/2.2/services/stack_advisor.py    |  53 +++++++-
 .../stacks/2.2/common/test_stack_advisor.py     | 132 ++++++++++++++++++-
 .../stacks/2.6/common/test_stack_advisor.py     |   9 ++
 6 files changed, 289 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
index 64e0bcb..c77aa2a 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
@@ -486,7 +486,10 @@
   </property>
   <property>
     <name>hadoop.registry.zk.quorum</name>
-    <value>localhost:2181</value>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
     <description>
       List of hostname:port pairs defining the zookeeper quorum binding for the registry
     </description>
@@ -553,7 +556,10 @@
   </property>
   <property>
     <name>yarn.resourcemanager.zk-address</name>
-    <value>localhost:2181</value>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
     <description>
       List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
     </description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
index 061587d..90f4a92 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
@@ -73,17 +73,41 @@
             <timeout>1200</timeout>
           </commandScript>
 
-          <!-- TODO HDP 3.0, add later after UI is fixed,
           <dependencies>
             <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>false</enabled>
+              </auto-deploy>
+              <conditions>
+                <condition xsi:type="propertyExists">
+                  <configType>yarn-site</configType>
+                  <property>yarn.resourcemanager.recovery.enabled</property>
+                  <value>true</value>
+                </condition>
+                <condition xsi:type="propertyExists">
+                  <configType>yarn-site</configType>
+                  <property>yarn.resourcemanager.ha.enabled</property>
+                  <value>true</value>
+                </condition>
+                <condition xsi:type="propertyExists">
+                  <configType>yarn-site</configType>
+                  <property>hadoop.registry.rm.enabled</property>
+                  <value>true</value>
+                </condition>
+              </conditions>
+            </dependency>
+            <!-- TODO HDP 3.0, add later after UI is fixed,
+            <dependency>
               <name>TEZ/TEZ_CLIENT</name>
               <scope>host</scope>
               <auto-deploy>
                 <enabled>true</enabled>
               </auto-deploy>
             </dependency>
+            -->
           </dependencies>
-          -->
 
           <logs>
             <log>
@@ -145,6 +169,23 @@
               <logId>yarn_nodemanager</logId>
             </log>
           </logs>
+
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>false</enabled>
+              </auto-deploy>
+              <conditions>
+                <condition xsi:type="propertyExists">
+                  <configType>yarn-site</configType>
+                  <property>yarn.nodemanager.recovery.enabled</property>
+                  <value>true</value>
+                </condition>
+              </conditions>
+            </dependency>
+          </dependencies>
         </component>
 
         <component>
@@ -214,7 +255,6 @@
       <requiredServices>
         <service>HDFS</service>
         <service>MAPREDUCE2</service>
-        <service>ZOOKEEPER</service>
       </requiredServices>
 
       <themes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index 0fb538d..1af9821 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -351,12 +351,21 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.hierarchy', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount-path', 'delete', 'true')
-    # recommend hadoop.registry.rm.enabled based on SLIDER in services
+    # recommend hadoop.registry.rm.enabled based on SLIDER and ZOOKEEPER in services
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if "SLIDER" in servicesList:
+    if "SLIDER" in servicesList and "ZOOKEEPER" in servicesList:
       putYarnProperty('hadoop.registry.rm.enabled', 'true')
     else:
       putYarnProperty('hadoop.registry.rm.enabled', 'false')
+    # recommend enabling RM and NM recovery if ZOOKEEPER in services
+    if "ZOOKEEPER" in servicesList:
+      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'true')
+      putYarnProperty('yarn.nodemanager.recovery.enabled', 'true')
+    else:
+      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'false')
+      putYarnProperty('yarn.nodemanager.recovery.enabled', 'false')
+      # recommend disabling RM HA if ZOOKEEPER is not in services
+      putYarnProperty('yarn.resourcemanager.ha.enabled', 'false')
 
   def recommendYARNConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
@@ -1795,6 +1804,7 @@ class YARNValidator(service_advisor.ServiceAdvisor):
     self.as_super.__init__(*args, **kwargs)
 
     self.validators = [("yarn-site", self.validateYARNSiteConfigurationsFromHDP206),
+                       ("yarn-site", self.validateYARNSiteConfigurationsFromHDP22),
                        ("yarn-site", self.validateYARNSiteConfigurationsFromHDP25),
                        ("yarn-site" , self.validateYARNSiteConfigurationsFromHDP26),
                        ("yarn-env", self.validateYARNEnvConfigurationsFromHDP206),
@@ -1837,6 +1847,45 @@ class YARNValidator(service_advisor.ServiceAdvisor):
                         {"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
     return self.toConfigurationValidationProblems(validationItems, "yarn-site")
 
+  def validateYARNSiteConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    """
+    This was copied from HDP 2.2; validate yarn-site
+    :return: A list of configuration validation problems.
+    """
+    yarn_site = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    zk_hosts = self.getHostsForComponent(services, "ZOOKEEPER", "ZOOKEEPER_SERVER")
+    if len(zk_hosts) == 0:
+      # ZOOKEEPER_SERVER isn't assigned to at least one host
+      if 'yarn.resourcemanager.recovery.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.resourcemanager.recovery.enabled']:
+        validationItems.append({"config-name": "yarn.resourcemanager.recovery.enabled",
+                                "item": self.getWarnItem(
+                                  "YARN resource manager recovery can only be enabled if ZOOKEEPER is installed.")})
+      if 'yarn.nodemanager.recovery.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.nodemanager.recovery.enabled']:
+        validationItems.append({"config-name": "yarn.nodemanager.recovery.enabled",
+                                "item": self.getWarnItem(
+                                  "YARN node manager recovery can only be enabled if ZOOKEEPER is installed.")})
+
+    if len(zk_hosts) < 3:
+      if 'yarn.resourcemanager.ha.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.resourcemanager.ha.enabled']:
+        validationItems.append({"config-name": "yarn.resourcemanager.ha.enabled",
+                                "item": self.getWarnItem(
+                                  "You must have at least 3 ZooKeeper Servers in your cluster to enable ResourceManager HA.")})
+
+    if 'ZOOKEEPER' not in servicesList or 'SLIDER' not in servicesList:
+      if 'hadoop.registry.rm.enabled' in yarn_site and \
+              'true' == yarn_site['hadoop.registry.rm.enabled']:
+        validationItems.append({"config-name": "hadoop.registry.rm.enabled",
+                                "item": self.getWarnItem(
+                                  "HADOOP resource manager registry can only be enabled if ZOOKEEPER and SLIDER are installed.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "yarn-site")
+
   def validateYARNSiteConfigurationsFromHDP25(self, properties, recommendedDefaults, configurations, services, hosts):
     yarn_site_properties = self.getSiteProperties(configurations, "yarn-site")
     validationItems = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 726514b..54ddd89 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -174,12 +174,23 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.hierarchy', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount-path', 'delete', 'true')
-    # recommend hadoop.registry.rm.enabled based on SLIDER in services
+    # recommend hadoop.registry.rm.enabled based on SLIDER and ZOOKEEPER in services
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if "SLIDER" in servicesList:
+    if "SLIDER" in servicesList and "ZOOKEEPER" in servicesList:
       putYarnProperty('hadoop.registry.rm.enabled', 'true')
     else:
       putYarnProperty('hadoop.registry.rm.enabled', 'false')
+    # recommend enabling RM and NM recovery if ZOOKEEPER in services
+    if "ZOOKEEPER" in servicesList:
+      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'true')
+      putYarnProperty('yarn.nodemanager.recovery.enabled', 'true')
+    else:
+      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'false')
+      putYarnProperty('yarn.nodemanager.recovery.enabled', 'false')
+      # recommend disabling RM HA if ZOOKEEPER is not in services
+      putYarnProperty('yarn.resourcemanager.ha.enabled', 'false')
+
+
 
   def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP22StackAdvisor, self).recommendHDFSConfigurations(configurations, clusterData, services, hosts)
@@ -1034,6 +1045,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
                "hadoop-env": self.validateHDFSConfigurationsEnv,
                "ranger-hdfs-plugin-properties": self.validateHDFSRangerPluginConfigurations},
       "YARN": {"yarn-env": self.validateYARNEnvConfigurations,
+               "yarn-site": self.validateYARNConfigurations,
                "ranger-yarn-plugin-properties": self.validateYARNRangerPluginConfigurations},
       "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
                "hive-site": self.validateHiveConfigurations,
@@ -1714,6 +1726,43 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
     return self.toConfigurationValidationProblems(validationItems, "ranger-storm-plugin-properties")
 
+  def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    parentValidationProblems = super(HDP22StackAdvisor, self).validateYARNConfigurations(properties, recommendedDefaults, configurations, services, hosts)
+    yarn_site = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    zk_hosts = self.getHostsForComponent(services, "ZOOKEEPER", "ZOOKEEPER_SERVER")
+    if len(zk_hosts) == 0:
+      # ZOOKEEPER_SERVER isn't assigned to at least one host
+      if 'yarn.resourcemanager.recovery.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.resourcemanager.recovery.enabled']:
+        validationItems.append({"config-name": "yarn.resourcemanager.recovery.enabled",
+                                "item": self.getWarnItem(
+                                  "YARN resource manager recovery can only be enabled if ZOOKEEPER is installed.")})
+      if 'yarn.nodemanager.recovery.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.nodemanager.recovery.enabled']:
+        validationItems.append({"config-name": "yarn.nodemanager.recovery.enabled",
+                                "item": self.getWarnItem(
+                                  "YARN node manager recovery can only be enabled if ZOOKEEPER is installed.")})
+
+    if len(zk_hosts) < 3:
+      if 'yarn.resourcemanager.ha.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.resourcemanager.ha.enabled']:
+        validationItems.append({"config-name": "yarn.resourcemanager.ha.enabled",
+                                "item": self.getWarnItem(
+                                  "You must have at least 3 ZooKeeper Servers in your cluster to enable ResourceManager HA.")})
+
+    if 'ZOOKEEPER' not in servicesList or 'SLIDER' not in servicesList:
+      if 'hadoop.registry.rm.enabled' in yarn_site and \
+              'true' == yarn_site['hadoop.registry.rm.enabled']:
+        validationItems.append({"config-name": "hadoop.registry.rm.enabled",
+                                "item": self.getWarnItem(
+                                  "HADOOP resource manager registry can only be enabled if ZOOKEEPER and SLIDER are installed.")})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "yarn-site")
+    validationProblems.extend(parentValidationProblems)
+    return validationProblems
+
   def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     parentValidationProblems = super(HDP22StackAdvisor, self).validateYARNEnvConfigurations(properties, recommendedDefaults, configurations, services, hosts)
     validationItems = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 571ff26..ee620b5 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -906,7 +906,62 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-vcores": "4",
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.nodemanager.resource.cpu-vcores": "4",
-          "hadoop.registry.rm.enabled": "true"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false"
+        }
+      }
+    }
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)
+
+  def test_recommendYARNConfigurationsWithZKAndSlider(self):
+    configurations = {}
+    services = {"configurations": configurations}
+    services['services'] = [
+      {
+        "StackServices": {
+          "service_name": "ZOOKEEPER"
+        },
+        },
+      {
+        "StackServices": {
+          "service_name": "YARN"
+        },
+        },
+      {
+        "StackServices": {
+          "service_name": "SLIDER"
+        },
+        }
+    ]
+    clusterData = {
+      "cpu": 4,
+      "containers" : 5,
+      "ramPerContainer": 256,
+      "yarnMinContainerSize": 256
+    }
+    expected = {
+      "yarn-env": {
+        "properties": {
+          "min_user_id": "500",
+          'service_check.queue.name': 'default'
+        }
+      },
+      "yarn-site": {
+        "properties": {
+          "yarn.nodemanager.linux-container-executor.group": "hadoop",
+          "yarn.nodemanager.resource.memory-mb": "1280",
+          "yarn.scheduler.minimum-allocation-mb": "256",
+          "yarn.scheduler.maximum-allocation-mb": "1280",
+          "yarn.scheduler.maximum-allocation-vcores": "4",
+          "yarn.scheduler.minimum-allocation-vcores": "1",
+          "yarn.nodemanager.resource.cpu-vcores": "4",
+          "hadoop.registry.rm.enabled": "true",
+          "yarn.resourcemanager.recovery.enabled": "true",
+          "yarn.nodemanager.recovery.enabled": "true"
         }
       }
     }
@@ -914,6 +969,55 @@ class TestHDP22StackAdvisor(TestCase):
     self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
     self.assertEquals(configurations, expected)
 
+  def test_recommendYARNConfigurationsWithZK(self):
+    configurations = {}
+    services = {"configurations": configurations}
+    services['services'] = [
+      {
+        "StackServices": {
+          "service_name": "ZOOKEEPER"
+        },
+        },
+      {
+        "StackServices": {
+          "service_name": "YARN"
+        },
+        }
+    ]
+    clusterData = {
+      "cpu": 4,
+      "containers" : 5,
+      "ramPerContainer": 256,
+      "yarnMinContainerSize": 256
+    }
+    expected = {
+      "yarn-env": {
+        "properties": {
+          "min_user_id": "500",
+          'service_check.queue.name': 'default'
+        }
+      },
+      "yarn-site": {
+        "properties": {
+          "yarn.nodemanager.linux-container-executor.group": "hadoop",
+          "yarn.nodemanager.resource.memory-mb": "1280",
+          "yarn.scheduler.minimum-allocation-mb": "256",
+          "yarn.scheduler.maximum-allocation-mb": "1280",
+          "yarn.scheduler.maximum-allocation-vcores": "4",
+          "yarn.scheduler.minimum-allocation-vcores": "1",
+          "yarn.nodemanager.resource.cpu-vcores": "4",
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "true",
+          "yarn.nodemanager.recovery.enabled": "true"
+        }
+      }
+    }
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)
+
+
+
   def test_recommendSPARKConfigurations(self):
     configurations = {}
     services = {"configurations": configurations}
@@ -979,7 +1083,10 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1280",
           "yarn.nodemanager.resource.cpu-vcores": "2",
-          "hadoop.registry.rm.enabled": "false"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -1806,7 +1913,10 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1792",
           "yarn.nodemanager.resource.cpu-vcores": "1",
-          "hadoop.registry.rm.enabled": "false"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -2070,7 +2180,10 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1280",
           "yarn.nodemanager.resource.cpu-vcores": "1",
-          "hadoop.registry.rm.enabled": "false"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -2285,7 +2398,10 @@ class TestHDP22StackAdvisor(TestCase):
                 "yarn.scheduler.minimum-allocation-vcores": "1",
                 "yarn.scheduler.maximum-allocation-mb": "1280",
                 "yarn.nodemanager.resource.cpu-vcores": "1",
-                "hadoop.registry.rm.enabled": "false"
+                "hadoop.registry.rm.enabled": "false",
+                "yarn.resourcemanager.recovery.enabled": "false",
+                "yarn.nodemanager.recovery.enabled": "false",
+                "yarn.resourcemanager.ha.enabled": "false"
             },
             "property_attributes": {
                 'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -3844,6 +3960,9 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-mb": "33792",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
           "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false",
           "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
           "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
           "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",
@@ -3903,6 +4022,9 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-mb": "33792",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
           "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false",
           "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
           "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
           "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index d4d28c9..96a595f 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -1153,6 +1153,9 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
+          'yarn.resourcemanager.recovery.enabled': 'false',
+          'yarn.nodemanager.recovery.enabled': 'false',
+          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.scheduler.minimum-allocation-vcores': '1',
           'yarn.scheduler.maximum-allocation-vcores': '4',
@@ -1329,6 +1332,9 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
+          'yarn.resourcemanager.recovery.enabled': 'false',
+          'yarn.nodemanager.recovery.enabled': 'false',
+          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
           'yarn.acl.enable': 'true',
@@ -1436,6 +1442,9 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
+          'yarn.resourcemanager.recovery.enabled': 'false',
+          'yarn.nodemanager.recovery.enabled': 'false',
+          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
           'yarn.acl.enable': 'true',


[04/33] ambari git commit: AMBARI-21344. Add Services Using Repository ID (alexantonenko)

Posted by rl...@apache.org.
AMBARI-21344. Add Services Using Repository ID (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/08dd492e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/08dd492e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/08dd492e

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 08dd492e5fb801355be8e2dcd895d9cb09d3dd3c
Parents: 4522cf5
Author: Alex Antonenko <hi...@gmail.com>
Authored: Mon Jun 26 17:44:29 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue Jun 27 11:05:14 2017 +0300

----------------------------------------------------------------------
 .../app/controllers/wizard/step8_controller.js      | 16 ++++++----------
 ambari-web/app/models/stack.js                      |  4 +++-
 2 files changed, 9 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/08dd492e/ambari-web/app/controllers/wizard/step8_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js b/ambari-web/app/controllers/wizard/step8_controller.js
index fa44a24..42519e0 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -913,6 +913,9 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, App.wiz
         installerController.postVersionDefinitionFileStep8(versionData.isXMLdata, versionData.data).done(function (versionInfo) {
           if (versionInfo.id && versionInfo.stackName && versionInfo.stackVersion) {
             var selectedStack = App.Stack.find().findProperty('isSelected', true);
+            if (selectedStack) {
+              selectedStack.set('versionInfoId', versionInfo.id);
+            }
             installerController.updateRepoOSInfo(versionInfo, selectedStack).done(function() {
               self._startDeploy();
             });
@@ -1011,20 +1014,13 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, App.wiz
    * @method createSelectedServicesData
    */
   createSelectedServicesData: function () {
-
-    var isInstaller = this.get('isInstaller')
     var selectedStack;
     if (this.get('isInstaller')) {
       selectedStack = App.Stack.find().findProperty('isSelected', true);
     }
-
-    return this.get('selectedServices').map(function (_service) {
-      if (selectedStack) {
-        return {"ServiceInfo": { "service_name": _service.get('serviceName'), "desired_repository_version": selectedStack.get('repositoryVersion') }};
-      } else {
-        return {"ServiceInfo": { "service_name": _service.get('serviceName') }};
-      }
-    });
+    return this.get('selectedServices').map(service => selectedStack ?
+      {"ServiceInfo": { "service_name": service.get('serviceName'), "desired_repository_version_id": selectedStack.get('versionInfoId') }} :
+      {"ServiceInfo": { "service_name": service.get('serviceName') }});
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/08dd492e/ambari-web/app/models/stack.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/stack.js b/ambari-web/app/models/stack.js
index 47d1c44..657ee5c 100644
--- a/ambari-web/app/models/stack.js
+++ b/ambari-web/app/models/stack.js
@@ -34,6 +34,8 @@ App.Stack = DS.Model.extend({
   operatingSystems: DS.hasMany('App.OperatingSystem'),
   isSelected: DS.attr('boolean', {defaultValue: false}),
 
+  versionInfoId: null,
+
   stackNameVersion: Em.computed.concat('-', 'stackName', 'stackVersion'),
 
   isPatch: Em.computed.equal('type', 'PATCH'),
@@ -81,4 +83,4 @@ App.Stack = DS.Model.extend({
 });
 
 
-App.Stack.FIXTURES = [];
\ No newline at end of file
+App.Stack.FIXTURES = [];


[10/33] ambari git commit: AMBARI-21287. Cannot install Datanode/AppTimeLine server from ambari 3.0. Fix root mode sudo issue (dlysnichenko)

Posted by rl...@apache.org.
AMBARI-21287. Cannot install Datanode/AppTimeLine server from ambari 3.0. Fix root mode sudo issue (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c1eeafbf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c1eeafbf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c1eeafbf

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: c1eeafbfebd089496153d5f3e3665e6a3302bd4a
Parents: ebd79e9
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Tue Jun 27 15:56:52 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Tue Jun 27 15:57:41 2017 +0300

----------------------------------------------------------------------
 .../libraries/functions/packages_analyzer.py         | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c1eeafbf/ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py b/ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py
index f4db3d2..5d67654 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py
@@ -26,6 +26,7 @@ from threading import Thread
 import threading
 from ambari_commons import OSCheck, OSConst
 from ambari_commons import shell
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.core.logger import Logger
 from resource_management.core import shell as rmf_shell
 from resource_management.core.exceptions import Fail
@@ -93,11 +94,11 @@ def allInstalledPackages(allInstalledPackages):
   """
   if OSCheck.is_suse_family():
     return _lookUpZypperPackages(
-      ["sudo", "zypper", "--no-gpg-checks", "search", "--installed-only", "--details"],
+      [AMBARI_SUDO_BINARY, "zypper", "--no-gpg-checks", "search", "--installed-only", "--details"],
       allInstalledPackages)
   elif OSCheck.is_redhat_family():
     return _lookUpYumPackages(
-      ["sudo", "yum", "list", "installed"],
+      [AMBARI_SUDO_BINARY, "yum", "list", "installed"],
       'Installed Packages',
       allInstalledPackages)
   elif OSCheck.is_ubuntu_family():
@@ -133,14 +134,14 @@ def get_available_packages_in_repos(repositories):
       available_packages_in_repos.append(package[0])
   elif OSCheck.is_suse_family():
     for repo in repo_ids:
-      _lookUpZypperPackages(["sudo", "zypper", "--no-gpg-checks", "search", "--details", "--repo", repo],
+      _lookUpZypperPackages([AMBARI_SUDO_BINARY, "zypper", "--no-gpg-checks", "search", "--details", "--repo", repo],
                             available_packages)
     available_packages_in_repos += [package[0] for package in available_packages]
   elif OSCheck.is_redhat_family():
     for repo in repo_ids:
-      _lookUpYumPackages(["sudo", "yum", "list", "available", "--disablerepo=*", "--enablerepo=" + repo],
+      _lookUpYumPackages([AMBARI_SUDO_BINARY, "yum", "list", "available", "--disablerepo=*", "--enablerepo=" + repo],
                          'Available Packages', available_packages)
-      _lookUpYumPackages(["sudo", "yum", "list", "installed", "--disablerepo=*", "--enablerepo=" + repo],
+      _lookUpYumPackages([AMBARI_SUDO_BINARY, "yum", "list", "installed", "--disablerepo=*", "--enablerepo=" + repo],
                          'Installed Packages', installed_packages)
     available_packages_in_repos += [package[0] for package in available_packages + installed_packages]
   return available_packages_in_repos
@@ -149,11 +150,11 @@ def get_available_packages_in_repos(repositories):
 def allAvailablePackages(allAvailablePackages):
   if OSCheck.is_suse_family():
     return _lookUpZypperPackages(
-      ["sudo", "zypper", "--no-gpg-checks", "search", "--uninstalled-only", "--details"],
+      [AMBARI_SUDO_BINARY, "zypper", "--no-gpg-checks", "search", "--uninstalled-only", "--details"],
       allAvailablePackages)
   elif OSCheck.is_redhat_family():
     return _lookUpYumPackages(
-      ["sudo", "yum", "list", "available"],
+      [AMBARI_SUDO_BINARY, "yum", "list", "available"],
       'Available Packages',
       allAvailablePackages)
   elif OSCheck.is_ubuntu_family():


[02/33] ambari git commit: AMBARI-21334 Ability to disable Container metrics in AMS (dsen)

Posted by rl...@apache.org.
AMBARI-21334 Ability to disable Container metrics in AMS (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6eaabc12
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6eaabc12
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6eaabc12

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 6eaabc120a2604b3e15c41f77dd88da52b3f6dd0
Parents: a2464b9
Author: Dmytro Sen <ds...@apache.org>
Authored: Mon Jun 26 19:52:14 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Mon Jun 26 19:52:14 2017 +0300

----------------------------------------------------------------------
 .../metrics/timeline/HBaseTimelineMetricStore.java       |  9 ++++++++-
 .../metrics/timeline/TimelineMetricConfiguration.java    | 11 +++++++++++
 .../metrics/timeline/TestTimelineMetricStore.java        |  1 +
 3 files changed, 20 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6eaabc12/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index 12c27a4..ad1fd67 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -82,6 +82,7 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
   private TimelineMetricMetadataManager metricMetadataManager;
   private Integer defaultTopNHostsLimit;
   private MetricCollectorHAController haController;
+  private boolean containerMetricsDisabled = false;
 
   /**
    * Construct the service.
@@ -188,7 +189,7 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
         LOG.info("Started watchdog for timeline metrics store with initial " +
           "delay = " + initDelay + ", delay = " + delay);
       }
-
+      containerMetricsDisabled = configuration.isContainerMetricsDisabled();
       isInitialized = true;
     }
 
@@ -363,6 +364,12 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
   @Override
   public TimelinePutResponse putContainerMetrics(List<ContainerMetric> metrics)
       throws SQLException, IOException {
+
+    if (containerMetricsDisabled) {
+      LOG.debug("Ignoring submitted container metrics according to configuration. Values will not be stored.");
+      return new TimelinePutResponse();
+    }
+
     hBaseAccessor.insertContainerMetrics(metrics);
     return new TimelinePutResponse();
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eaabc12/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index 006a403..44073ab 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -191,6 +191,9 @@ public class TimelineMetricConfiguration {
   public static final String TIMELINE_SERVICE_RPC_ADDRESS =
     "timeline.metrics.service.rpc.address";
 
+  public static final String TIMELINE_SERVICE_DISABLE_CONTAINER_METRICS =
+    "timeline.metrics.service.container.metrics.disabled";
+
   public static final String CLUSTER_AGGREGATOR_APP_IDS =
     "timeline.metrics.service.cluster.aggregator.appIds";
 
@@ -507,4 +510,12 @@ public class TimelineMetricConfiguration {
 
     return whitelist;
   }
+
+  public boolean isContainerMetricsDisabled() {
+    try {
+      return metricsConf != null && Boolean.parseBoolean(metricsConf.get(TIMELINE_SERVICE_DISABLE_CONTAINER_METRICS, "false"));
+    } catch (Exception e) {
+      return false;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eaabc12/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
index ac2f9d7..8abcd83 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
@@ -111,4 +111,5 @@ public class TestTimelineMetricStore implements TimelineMetricStore {
   public List<String> getLiveInstances() {
     return Collections.emptyList();
   }
+  
 }


[27/33] ambari git commit: AMBARI-21343. Cleanup relevant Kerberos identities when a component is removed - addendum: fix missing copyrights (amagyar)

Posted by rl...@apache.org.
AMBARI-21343. Cleanup relevant Kerberos identities when a component is removed - addendum: fix missing copyrights (amagyar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/09e5d41c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/09e5d41c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/09e5d41c

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 09e5d41c5d52339a0edea97886acf0fd38ee8a91
Parents: 8b5c7db
Author: Attila Magyar <am...@hortonworks.com>
Authored: Thu Jun 29 12:14:54 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Thu Jun 29 12:14:54 2017 +0200

----------------------------------------------------------------------
 .../controller/OrderedRequestStageContainer.java   | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/09e5d41c/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
index 6d8b5a3..4ac6896 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.ambari.server.controller;
 
 import org.apache.ambari.server.AmbariException;


[11/33] ambari git commit: Updated team page. (yusaku)

Posted by rl...@apache.org.
Updated team page. (yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3529d053
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3529d053
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3529d053

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 3529d053dae7381f098d6998280ab7f152e86843
Parents: c1eeafb
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Tue Jun 27 10:19:26 2017 -0700
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Tue Jun 27 10:19:26 2017 -0700

----------------------------------------------------------------------
 docs/pom.xml | 12 ++++++++++++
 1 file changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3529d053/docs/pom.xml
----------------------------------------------------------------------
diff --git a/docs/pom.xml b/docs/pom.xml
index a862796..390ade4 100644
--- a/docs/pom.xml
+++ b/docs/pom.xml
@@ -234,6 +234,18 @@
             </organization>
         </developer>
         <developer>
+            <id>amagyar</id>
+            <name>Attila Magyar</name>
+            <email>amagyar@apache.org</email>
+            <timezone>+1</timezone>
+            <roles>
+                <role>Committer</role>
+            </roles>
+            <organization>
+                Hortonworks
+            </organization>
+        </developer>
+        <developer>
             <id>avijayan</id>
             <name>Aravindan Vijayan</name>
             <email>avijayan@apache.org</email>


[14/33] ambari git commit: AMBARI-21327 Ambari server to print error messages if NN HA namenode services properties use diff FQDN (dual network cards) than FQDN in the HostComponentState table (dili)

Posted by rl...@apache.org.
AMBARI-21327 Ambari server to print error messages if NN HA namenode services properties use diff FQDN (dual network cards) than FQDN in the HostComponentState table (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2f402505
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2f402505
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2f402505

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 2f402505060354e8c71d24879c47a3850cc04009
Parents: 40e6352
Author: Di Li <di...@apache.org>
Authored: Tue Jun 27 15:56:53 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Tue Jun 27 15:56:53 2017 -0400

----------------------------------------------------------------------
 .../apache/ambari/server/stack/MasterHostResolver.java   | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2f402505/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
index 427a5f5..fc657c1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
@@ -132,7 +132,7 @@ public class MasterHostResolver {
               return filterHosts(hostsType, serviceName, componentName);
             }
 
-            Map<Status, String> pair = getNameNodePair();
+            Map<Status, String> pair = getNameNodePair(componentHosts);
             if (pair != null) {
               hostsType.master = pair.containsKey(Status.ACTIVE) ? pair.get(Status.ACTIVE) :  null;
               hostsType.secondary = pair.containsKey(Status.STANDBY) ? pair.get(Status.STANDBY) :  null;
@@ -273,7 +273,7 @@ public class MasterHostResolver {
    * one active and one standby host were found, otherwise, return null.
    * The hostnames are returned in lowercase.
    */
-  private Map<Status, String> getNameNodePair() {
+  private Map<Status, String> getNameNodePair(Set<String> componentHosts) throws AmbariException {
     Map<Status, String> stateToHost = new HashMap<>();
     Cluster cluster = getCluster();
 
@@ -307,6 +307,13 @@ public class MasterHostResolver {
           throw new MalformedURLException("Could not parse host and port from " + value);
         }
 
+        if (!componentHosts.contains(hp.host)){
+          //This may happen when NN HA is configured on dual network card machines with public/private FQDNs.
+          LOG.error(
+              String.format(
+                  "Hadoop NameNode HA configuration {0} contains host {1} that does not exist in the NameNode hosts list {3}",
+                  key, hp.host, componentHosts.toString()));
+        }
         String state = queryJmxBeanValue(hp.host, hp.port, "Hadoop:service=NameNode,name=NameNodeStatus", "State", true, encrypted);
 
         if (null != state && (state.equalsIgnoreCase(Status.ACTIVE.toString()) || state.equalsIgnoreCase(Status.STANDBY.toString()))) {


[18/33] ambari git commit: AMBARI-21256 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)

Posted by rl...@apache.org.
AMBARI-21256 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5e50042a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5e50042a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5e50042a

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 5e50042a78e4ddcb8e50bb28289ba67c50fb502c
Parents: 4c1ea4c
Author: Vishal Suvagia <vi...@yahoo.com>
Authored: Thu Jun 15 15:27:47 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Wed Jun 28 14:00:01 2017 +0530

----------------------------------------------------------------------
 .../common-services/HBASE/0.96.0.2.0/role_command_order.json      | 3 +--
 .../common-services/HBASE/2.0.0.3.0/role_command_order.json       | 2 +-
 2 files changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5e50042a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
index 110b179..58d0c1c 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
@@ -4,7 +4,6 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"]
-
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"]
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5e50042a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
index 44d0c61..69f4bf6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
@@ -4,7 +4,7 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"],
     "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"]
   }
 }


[31/33] ambari git commit: AMBARI-21376. Using URI naming format in dfs.datanode.data.dir causing datanodes startup failure (aonishuk)

Posted by rl...@apache.org.
AMBARI-21376. Using URI naming format in dfs.datanode.data.dir causing datanodes startup failure  (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7554509f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7554509f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7554509f

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 7554509f5ab1ddf262746ddab3c74f88c9f8154a
Parents: aa7a8c6
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Jun 29 20:44:11 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Jun 29 20:44:11 2017 +0300

----------------------------------------------------------------------
 .../resource_management/libraries/functions/mounted_dirs_helper.py  | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7554509f/ambari-common/src/main/python/resource_management/libraries/functions/mounted_dirs_helper.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/mounted_dirs_helper.py b/ambari-common/src/main/python/resource_management/libraries/functions/mounted_dirs_helper.py
index 0ebd7e2..712eacf 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/mounted_dirs_helper.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/mounted_dirs_helper.py
@@ -115,6 +115,7 @@ def handle_mounted_dirs(func, dirs_string, history_filename, update_cache=True):
   dirs_unmounted = set()         # set of dirs that have become unmounted
   valid_existing_dirs = []
 
+  dirs_string = dirs_string.replace("file:///","/")
   dirs_string = ",".join([re.sub(r'^\[.+\]', '', dfs_dir.strip()) for dfs_dir in dirs_string.split(",")])
   for dir in dirs_string.split(","):
     if dir is None or dir.strip() == "":


[28/33] ambari git commit: AMBARI-21363 ORA-00911 error during Ambari server schema upgrade due to incorrect syntax of Update statement (dgrinenko)

Posted by rl...@apache.org.
AMBARI-21363 ORA-00911 error during Ambari server schema upgrade due to incorrect syntax of Update statement (dgrinenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4cd31501
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4cd31501
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4cd31501

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 4cd3150111560a43dd8028e6f14b2abf753e3d8b
Parents: 09e5d41
Author: Dmytro Grinenko <ha...@apache.org>
Authored: Thu Jun 29 15:17:29 2017 +0300
Committer: Dmytro Grinenko <ha...@apache.org>
Committed: Thu Jun 29 15:17:29 2017 +0300

----------------------------------------------------------------------
 .../ambari/server/orm/DBAccessorImpl.java       | 43 +++++++++++---------
 .../orm/helpers/dbms/GenericDbmsHelper.java     |  2 +-
 2 files changed, 25 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4cd31501/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
index 83ea8e1..a88430b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
@@ -204,6 +204,27 @@ public class DBAccessorImpl implements DBAccessor {
     return objectName;
   }
 
+  /**
+   * Setting arguments for prepared statement
+   *
+   * @param preparedStatement {@link PreparedStatement} object
+   * @param arguments array of arguments
+   *
+   * @throws SQLException
+   */
+  private void setArgumentsForPreparedStatement(PreparedStatement preparedStatement, Object[] arguments) throws SQLException{
+    for (int i = 0; i < arguments.length; i++) {
+      if (arguments[i] instanceof byte[]) {
+        byte[] binaryData = (byte[]) arguments[i];
+
+        // JDBC drivers supports only this function signature
+        preparedStatement.setBinaryStream(i+1, new ByteArrayInputStream(binaryData), binaryData.length);
+      } else {
+        preparedStatement.setObject(i+1, arguments[i]);
+      }
+    }
+  }
+
   @Override
   public boolean tableExists(String tableName) throws SQLException {
     boolean result = false;
@@ -878,16 +899,8 @@ public class DBAccessorImpl implements DBAccessor {
     LOG.info("Executing prepared query: {}", query);
 
     PreparedStatement preparedStatement = getConnection().prepareStatement(query);
+    setArgumentsForPreparedStatement(preparedStatement, arguments);
 
-      for (int i = 0; i < arguments.length; i++) {
-        if (arguments[i] instanceof byte[]) {
-          byte[] binaryData = (byte[]) arguments[i];
-          // JDBC drivers supports only this function signature
-          preparedStatement.setBinaryStream(i+1, new ByteArrayInputStream(binaryData), binaryData.length);
-        } else {
-          preparedStatement.setObject(i+1, arguments[i]);
-        }
-      }
     try {
         preparedStatement.execute();
     } catch (SQLException e) {
@@ -908,7 +921,7 @@ public class DBAccessorImpl implements DBAccessor {
    {@inheritDoc}
    */
   public void executePreparedUpdate(String query, Object...arguments) throws SQLException {
-    executePreparedQuery(query, false, arguments);
+    executePreparedUpdate(query, false, arguments);
   }
 
   /**
@@ -918,16 +931,8 @@ public class DBAccessorImpl implements DBAccessor {
     LOG.info("Executing prepared query: {}", query);
 
     PreparedStatement preparedStatement = getConnection().prepareStatement(query);
+    setArgumentsForPreparedStatement(preparedStatement, arguments);
 
-    for (int i = 0; i <= arguments.length; i++) {
-      if (arguments[i] instanceof byte[]) {
-        byte[] binaryData = (byte[]) arguments[i];
-        // JDBC drivers supports only this function signature
-        preparedStatement.setBinaryStream(i+1, new ByteArrayInputStream(binaryData), binaryData.length);
-      } else {
-        preparedStatement.setObject(i+1, arguments[i]);
-      }
-    }
     try {
       preparedStatement.executeUpdate();
     } catch (SQLException e) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4cd31501/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
index 56274c5..e2a1f38 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
@@ -280,7 +280,7 @@ public class GenericDbmsHelper implements DbmsHelper {
    */
   @Override
   public String getColumnUpdateStatementWhereColumnIsNull(String tableName, String setColumnName, String conditionColumnName){
-    return "UPDATE " + tableName + " SET " + setColumnName + "=? WHERE " + conditionColumnName + " IS NULL;";
+    return "UPDATE " + tableName + " SET " + setColumnName + "=? WHERE " + conditionColumnName + " IS NULL";
   }
 
   /**


[12/33] ambari git commit: AMBARI-21172 - Delete view privileges from the Groups page (Anita Jebaraj via sangeetar)

Posted by rl...@apache.org.
AMBARI-21172 - Delete view privileges from the Groups page (Anita Jebaraj via sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/86347182
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/86347182
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/86347182

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 86347182a99209dcd767240ae475a03549acd989
Parents: 3529d05
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Tue Jun 27 10:49:43 2017 -0700
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Tue Jun 27 10:49:43 2017 -0700

----------------------------------------------------------------------
 .../scripts/controllers/groups/GroupsEditCtrl.js | 19 +++++++++++++++++--
 .../ui/admin-web/app/views/groups/edit.html      |  3 +++
 2 files changed, 20 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/86347182/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
index 92406e9..21d0fd6 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
@@ -129,6 +129,20 @@ angular.module('ambariAdminConsole')
     });
   };
 
+
+  $scope.removePrivilege = function(name, privilege) {
+    var privilegeObject = {
+        id: privilege.privilege_id,
+        view_name: privilege.view_name,
+        version: privilege.version,
+        instance_name: name
+    };
+    View.deletePrivilege(privilegeObject).then(function() {
+      loadPrivileges();
+    });
+  };
+
+function loadPrivileges() {
   // Load privileges
   Group.getPrivileges($routeParams.id).then(function(data) {
     var privileges = {
@@ -145,6 +159,7 @@ angular.module('ambariAdminConsole')
         privileges.views[privilege.instance_name] = privileges.views[privilege.instance_name] || { privileges:[]};
         privileges.views[privilege.instance_name].version = privilege.version;
         privileges.views[privilege.instance_name].view_name = privilege.view_name;
+        privileges.views[privilege.instance_name].privilege_id = privilege.privilege_id;
         privileges.views[privilege.instance_name].privileges.push(privilege.permission_label);
       }
     });
@@ -157,6 +172,6 @@ angular.module('ambariAdminConsole')
   }).catch(function(data) {
     Alert.error($t('common.alerts.cannotLoadPrivileges'), data.data.message);
   });
-
-
+}
+loadPrivileges();
 }]);

http://git-wip-us.apache.org/repos/asf/ambari/blob/86347182/ambari-admin/src/main/resources/ui/admin-web/app/views/groups/edit.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/groups/edit.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/groups/edit.html
index e472ede..1aafd03 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/groups/edit.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/groups/edit.html
@@ -83,6 +83,9 @@
               <td>
                 <span tooltip="{{item}}" ng-repeat="item in privilege.privileges">{{item | translate}}{{$last ? '' : ', '}}</span>
               </td>
+              <td>
+                <i class="fa fa-trash-o" aria-hidden="true" ng-click="removePrivilege(name, privilege);"></i>
+              </td>
             </tr>
             <tr>
               <td ng-show="noViewPriv">{{'common.alerts.noPrivileges' | translate: '{term: constants.view}'}}</td>


[05/33] ambari git commit: AMBARI-21347. Service Page: Some Alerts are missing their Status (magyari_sandor)

Posted by rl...@apache.org.
AMBARI-21347. Service Page: Some Alerts are missing their Status (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9833bc18
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9833bc18
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9833bc18

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 9833bc182d9b44a69bb766de77311d4a3a50fa5e
Parents: 08dd492
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Mon Jun 26 20:01:59 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Tue Jun 27 11:15:03 2017 +0200

----------------------------------------------------------------------
 .../controller/AmbariManagementControllerImpl.java      | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9833bc18/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index ed707e7..6781f65 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -182,6 +182,7 @@ import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.UnlimitedKeyJCERequirement;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
+import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
 import org.apache.ambari.server.state.quicklinksprofile.QuickLinkVisibilityController;
 import org.apache.ambari.server.state.quicklinksprofile.QuickLinkVisibilityControllerFactory;
 import org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfile;
@@ -193,6 +194,7 @@ import org.apache.ambari.server.state.stack.WidgetLayout;
 import org.apache.ambari.server.state.stack.WidgetLayoutInfo;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpSucceededEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStopEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostUpgradeEvent;
@@ -3023,7 +3025,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
             // START task should run configuration script.
             if (newState == State.INSTALLED && skipInstallTaskForComponent(requestProperties, cluster, scHost)) {
               LOG.info("Skipping create of INSTALL task for {} on {}.", scHost.getServiceComponentName(), scHost.getHostName());
-              scHost.setState(State.INSTALLED);
+              // set state to INSTALLING, then immediately send an ServiceComponentHostOpSucceededEvent to allow
+              // transitioning from INSTALLING --> INSTALLED.
+              scHost.setState(State.INSTALLING);
+              long now = System.currentTimeMillis();
+              try {
+                scHost.handleEvent(new ServiceComponentHostOpSucceededEvent(scHost.getServiceComponentName(), scHost.getHostName(), now));
+              } catch (InvalidStateTransitionException e) {
+                LOG.error("Error transitioning ServiceComponentHost state to INSTALLED", e);
+              }
             } else {
               // !!! can never be null
               RepositoryVersionEntity repoVersion = serviceComponent.getDesiredRepositoryVersion();


[16/33] ambari git commit: BUG-82124 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)

Posted by rl...@apache.org.
BUG-82124 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/39efba35
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/39efba35
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/39efba35

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 39efba35980642b832f79c6afb332716045d859f
Parents: 9aa786f
Author: Vishal Suvagia <vi...@yahoo.com>
Authored: Thu Jun 15 15:27:47 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Wed Jun 28 13:32:37 2017 +0530

----------------------------------------------------------------------
 .../common-services/HBASE/0.96.0.2.0/role_command_order.json      | 3 +--
 .../common-services/HBASE/2.0.0.3.0/role_command_order.json       | 2 +-
 2 files changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/39efba35/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
index 110b179..58d0c1c 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
@@ -4,7 +4,6 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"]
-
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"]
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/39efba35/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
index 44d0c61..69f4bf6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
@@ -4,7 +4,7 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"],
     "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"]
   }
 }


[15/33] ambari git commit: AMBARI-21154: Add JAAS config properties for Atlas Hive hook in HiveCli to use kerberos ticket-cache.

Posted by rl...@apache.org.
AMBARI-21154: Add JAAS config properties for Atlas Hive hook in HiveCli to use kerberos ticket-cache.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9aa786f7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9aa786f7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9aa786f7

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 9aa786f7ea4c21159e6a014b4cbb6a6de155b22c
Parents: 2f40250
Author: Vishal Suvagia <vi...@yahoo.com>
Authored: Fri Jun 23 17:41:50 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Wed Jun 28 11:32:26 2017 +0530

----------------------------------------------------------------------
 .../HIVE/2.1.0.3.0/service_advisor.py           |  19 +++
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |  10 +-
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |   7 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |   5 +-
 .../stacks/HDP/2.6/services/stack_advisor.py    |  21 ++++
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |   9 ++
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |   6 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |   1 +
 .../stacks/2.6/common/test_stack_advisor.py     | 123 ++++++++++++++-----
 9 files changed, 165 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
index 6d3e13d..48058f7 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
@@ -683,6 +683,25 @@ class HiveRecommender(service_advisor.ServiceAdvisor):
     else:
       self.logger.info("Not setting Hive Repo user for Ranger.")
 
+    security_enabled = self.isSecurityEnabled(services)
+    enable_atlas_hook = False
+
+    if 'hive-env' in configurations and 'hive.atlas.hook' in configurations['hive-env']['properties']:
+      enable_atlas_hook = configurations['hive-env']['properties']['hive.atlas.hook'].lower() == 'true'
+    elif 'hive-env' in services['configurations'] and 'hive.atlas.hook' in services['configurations']['hive-env']['properties']:
+      enable_atlas_hook = services['configurations']['hive-env']['properties']['hive.atlas.hook'].lower() == 'true'
+
+    if 'hive-atlas-application.properties' in services['configurations']:
+      putHiveAtlasHookProperty = self.putProperty(configurations, "hive-atlas-application.properties", services)
+      putHiveAtlasHookPropertyAttribute = self.putPropertyAttribute(configurations,"hive-atlas-application.properties")
+      if security_enabled and enable_atlas_hook:
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag', 'required')
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.loginModuleName', 'com.sun.security.auth.module.Krb5LoginModule')
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache', 'true')
+      else:
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag', 'delete', 'true')
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleName', 'delete', 'true')
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache', 'delete', 'true')
 
   def getDBDriver(self, databaseType):
     driverDict = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index a29f74b..30796cc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -434,7 +434,15 @@
             <regex-replace key="content" find="property.llap.daemon.log.maxfilesize = ([0-9]+)MB" replace-with="property.llap.daemon.log.maxfilesize = {{hive_llap_log_maxfilesize}}MB"/>
             <regex-replace key="content" find="property.llap.daemon.log.maxbackupindex = ([0-9]+)" replace-with="property.llap.daemon.log.maxbackupindex = {{hive_llap_log_maxbackupindex}}"/>
           </definition>
-
+          <definition xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook" summary="Updating hive atlas application properties">
+            <type>hive-atlas-application.properties</type>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag" value="required"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.loginModuleName" value="com.sun.security.auth.module.Krb5LoginModule"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.option.useTicketCache" value="true"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+          </definition>
 
           <definition xsi:type="configure" id="hdp_2_6_0_0_hive_set_hive_enforce_bucketing_property">
             <type>hive-site</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index 8c659ee..1f37389 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -621,7 +621,12 @@
           <summary>Removing atlas.cluster.name property</summary>
         </task>
       </execute-stage>
-      
+
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Updating hive atlas application properties">
+        <task xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook">
+          <summary>Updating hive atlas application properties</summary>
+        </task>
+      </execute-stage>
       <!-- SPARK -->
       <execute-stage service="SPARK" component="LIVY_SERVER" title="Apply config changes for Livy Server">
         <task xsi:type="configure" id="hdp_2_5_0_0_rename_spark_livy_configs"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 3054ca3..22c9a8d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -825,10 +825,11 @@
           <task xsi:type="configure" id="hdp_2_6_0_0_hive_set_hive_enforce_bucketing_property" />
           <task xsi:type="configure" id="hdp_2_6_0_0_copy_hive_tez_container_size_to_hiveInteractive" />
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_atlas_cluster_name" />
+          <task xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook"/>
         </pre-upgrade>
-        
+
         <pre-downgrade />
-        
+
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index f8bbca5..82656aa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -547,6 +547,9 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
 
   def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
+    putHiveAtlasHookProperty = self.putProperty(configurations, "hive-atlas-application.properties", services)
+    putHiveAtlasHookPropertyAttribute = self.putPropertyAttribute(configurations,"hive-atlas-application.properties")
+
     if 'hive-env' in services['configurations'] and 'hive_user' in services['configurations']['hive-env']['properties']:
       hive_user = services['configurations']['hive-env']['properties']['hive_user']
     else:
@@ -566,6 +569,24 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
     else:
       self.logger.info("Not setting Hive Repo user for Ranger.")
 
+    security_enabled = self.isSecurityEnabled(services)
+    enable_atlas_hook = False
+
+    if 'hive-env' in configurations and 'hive.atlas.hook' in configurations['hive-env']['properties']:
+      enable_atlas_hook = configurations['hive-env']['properties']['hive.atlas.hook'].lower() == 'true'
+    elif 'hive-env' in services['configurations'] and 'hive.atlas.hook' in services['configurations']['hive-env']['properties']:
+      enable_atlas_hook = services['configurations']['hive-env']['properties']['hive.atlas.hook'].lower() == 'true'
+
+    if 'hive-atlas-application.properties' in services['configurations']:
+      if security_enabled and enable_atlas_hook:
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag', 'required')
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.loginModuleName', 'com.sun.security.auth.module.Krb5LoginModule')
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache', 'true')
+      else:
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag', 'delete', 'true')
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleName', 'delete', 'true')
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache', 'delete', 'true')
+
   def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendHBASEConfigurations(configurations, clusterData, services, hosts)
     if 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties']:

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index 1cbd78b..6dd2129 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -66,6 +66,15 @@
             <set key="ranger.plugin.hive.urlauth.filesystem.schemes" value="hdfs:,file:,wasb:,adl:"
               if-type="ranger-hive-security" if-key="ranger.plugin.hive.service.name" if-key-state="present"/>
           </definition>
+          <definition xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook" summary="Updating hive atlas application properties">
+            <type>hive-atlas-application.properties</type>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag" value="required"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.loginModuleName" value="com.sun.security.auth.module.Krb5LoginModule"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.option.useTicketCache" value="true"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index ede267a..e262971 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -307,6 +307,12 @@
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_hive_plugin_urlauth_filesystem_schemes"/>
       </execute-stage>
 
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Updating hive atlas application properties">
+        <task xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook">
+          <summary>Updating hive atlas application properties</summary>
+        </task>
+      </execute-stage>
+
       <!-- HBASE -->
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for Ranger Hbase plugin">
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_hbase_plugin_cluster_name"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index b70943b..6b01ce9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -792,6 +792,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_hive_plugin_cluster_name"/>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_hive_plugin_urlauth_filesystem_schemes"/>
+          <task xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index d4d28c9..3ba18d8 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -966,19 +966,33 @@ class TestHDP26StackAdvisor(TestCase):
 
   def test_recommendHiveConfigurations(self):
     configurations = {
-      "ranger-hive-plugin-properties": {
-        "properties": {
-          "ranger-hive-plugin-enabled": "Yes",
-          "REPOSITORY_CONFIG_USERNAME":"hive"
+      "hive-env" : {
+        "properties" : {
+          "hive.atlas.hook" : "false",
+          "hive_user": "custom_hive",
+          "hive_security_authorization": "Ranger"
         }
       },
-      "hive-env":{
-        "properties":{
-          "hive_security_authorization":"ranger",
-          "hive_user":"custom_hive"
+      "ranger-env" : {
+        "properties" : {
+          "ranger-hive-plugin-enabled" : "Yes"
+        }
+      },
+      "cluster-env" : {
+        "properties" : {
+          "security_enabled" : "false"
+        }
+      },
+      "ranger-hive-plugin-properties" : {
+        "properties" : {
+          "REPOSITORY_CONFIG_USERNAME": "hive"
         }
+      },
+      "hive-atlas-application.properties" : {
+        "properties": {}
       }
     }
+
     clusterData = {
       "cpu": 4,
       "mapMemory": 3000,
@@ -1012,31 +1026,44 @@ class TestHDP26StackAdvisor(TestCase):
 
     services = {
       "services":
-        [{
-           "StackServices": {
-             "service_name": "YARN"
-           }, "components": []
-         },
-         {
-            "StackServices": {
+        [
+          {
+            "StackServices" : {
+             "service_name" : "YARN"
+            },
+            "components" : []
+          },
+          {
+            "StackServices" : {
               "service_name" : "HIVE",
               "service_version" : "1.2.1.2.6"
             },
-            "components": [
-            ]
+            "components": []
+          },
+          {
+            "StackServices" : {
+              "service_name" : "ATLAS",
+              "service_version": "0.8.0"
+            },
+            "components": []
+          },
+          {
+            "StackServices" : {
+              "service_name" : "RANGER",
+              "service_version": "0.7.0"
+            },
+            "components": []
           }
         ],
       "Versions": {
         "stack_name" : "HDP",
         "stack_version": "2.6"
       },
-      "changed-configurations": [
-      ],
+      "changed-configurations": [],
       "configurations": configurations,
       "ambari-server-properties": {"ambari-server.user":"ambari_user"}
     }
 
-
     expected = {
       'yarn-env': {
         'properties': {
@@ -1047,7 +1074,6 @@ class TestHDP26StackAdvisor(TestCase):
       },
       'ranger-hive-plugin-properties': {
         'properties': {
-          'ranger-hive-plugin-enabled': 'Yes',
           'REPOSITORY_CONFIG_USERNAME': 'custom_hive'
         }
       },
@@ -1068,12 +1094,11 @@ class TestHDP26StackAdvisor(TestCase):
       },
       'hive-env': {
         'properties': {
-          'hive.atlas.hook': 'false',
-          'hive_security_authorization': 'ranger',
+          'hive.atlas.hook': 'true',
+          'hive_security_authorization': 'Ranger',
           'hive_exec_orc_storage_strategy': 'SPEED',
           'hive_timeline_logging_enabled': 'true',
-          'hive_txn_acid': 'off',
-          'hive_user': 'custom_hive'
+          'hive_txn_acid': 'off'
         }
       },
       'hiveserver2-site': {
@@ -1098,7 +1123,7 @@ class TestHDP26StackAdvisor(TestCase):
           'hive.exec.orc.encoding.strategy': 'SPEED',
           'hive.server2.tez.initialize.default.sessions': 'false',
           'hive.security.authorization.enabled': 'true',
-          'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
+          'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook,org.apache.atlas.hive.hook.HiveHook',
           'hive.server2.tez.default.queues': 'default',
           'hive.prewarm.enabled': 'false',
           'hive.exec.orc.compression.strategy': 'SPEED',
@@ -1144,6 +1169,21 @@ class TestHDP26StackAdvisor(TestCase):
           },
           'atlas.rest.address': {
             'delete': 'true'
+          },
+          'hive.server2.authentication.pam.services': {
+            'delete': 'true'
+          },
+          'hive.server2.custom.authentication.class': {
+            'delete': 'true'
+          },
+          'hive.server2.authentication.kerberos.principal': {
+            'delete': 'true'
+          },
+          'hive.server2.authentication.kerberos.keytab': {
+            'delete': 'true'
+          },
+          'hive.server2.authentication.ldap.url': {
+            'delete': 'true'
           }
         }
       },
@@ -1174,16 +1214,35 @@ class TestHDP26StackAdvisor(TestCase):
             'delete': 'true'
           }
         }
+      },
+      'hive-atlas-application.properties' : {
+        'properties' : {},
+        'property_attributes' : {
+            'atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag': {'delete': 'true'},
+            'atlas.jaas.ticketBased-KafkaClient.loginModuleName': {'delete': 'true'},
+            'atlas.jaas.ticketBased-KafkaClient.option.useTicketCache': {'delete': 'true'}
+        }
       }
     }
 
-    self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations,expected)
-    configurations['hive-env']['properties']['hive_user'] = 'hive'
-    expected['hive-env']['properties']['hive_user'] = 'hive'
+    recommendedConfigurations = {}
+    self.stackAdvisor.recommendHIVEConfigurations(recommendedConfigurations, clusterData, services, hosts)
+    self.assertEquals(recommendedConfigurations, expected)
+
+    services['configurations']['hive-env']['properties']['hive_user'] = 'hive'
     expected['ranger-hive-plugin-properties']['properties']['REPOSITORY_CONFIG_USERNAME'] = 'hive'
-    self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations,expected)
+    services['configurations']['cluster-env']['properties']['security_enabled'] = 'true'
+    expected['hive-atlas-application.properties']['properties']['atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag'] = 'required'
+    expected['hive-atlas-application.properties']['properties']['atlas.jaas.ticketBased-KafkaClient.loginModuleName'] = 'com.sun.security.auth.module.Krb5LoginModule'
+    expected['hive-atlas-application.properties']['properties']['atlas.jaas.ticketBased-KafkaClient.option.useTicketCache'] = 'true'
+    del expected['hive-atlas-application.properties']['property_attributes']
+    expected['core-site'] = {
+      'properties': {}
+    }
+
+    recommendedConfigurations = {}
+    self.stackAdvisor.recommendHIVEConfigurations(recommendedConfigurations, clusterData, services, hosts)
+    self.assertEquals(recommendedConfigurations, expected)
 
 
   def test_recommendHBASEConfigurations(self):


[25/33] ambari git commit: AMBARI-21371 - Adding ranger failed when user has custom properties (rzang)

Posted by rl...@apache.org.
AMBARI-21371 - Adding ranger failed when user has custom properties (rzang)

Change-Id: I3b73ade85c7818939e6c384f8b6bcc9966b448cb


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9d224f73
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9d224f73
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9d224f73

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 9d224f73b68279bcec834a28c35dd76122d9e73d
Parents: 2f0de69
Author: Richard Zang <rz...@apache.org>
Authored: Wed Jun 28 13:50:38 2017 -0700
Committer: Richard Zang <rz...@apache.org>
Committed: Wed Jun 28 13:50:38 2017 -0700

----------------------------------------------------------------------
 ambari-web/app/mixins/common/configs/enhanced_configs.js | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9d224f73/ambari-web/app/mixins/common/configs/enhanced_configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/enhanced_configs.js b/ambari-web/app/mixins/common/configs/enhanced_configs.js
index 4561f77..8fc7a4c 100644
--- a/ambari-web/app/mixins/common/configs/enhanced_configs.js
+++ b/ambari-web/app/mixins/common/configs/enhanced_configs.js
@@ -553,12 +553,13 @@ App.EnhancedConfigsMixin = Em.Mixin.create(App.ConfigWithOverrideRecommendationP
     if (Em.isNone(recommended)) {
       stepConfig.get('configs').removeObject(config);
     } else if (Em.isNone(initial)) {
+      var stackConfigProperty = App.configsCollection.getConfigByName(name, filename);
       stepConfig.get('configs').pushObject(this._createNewProperty(
         name,
         filename,
         Em.get(prop, 'serviceName'),
         recommended,
-        App.configsCollection.getConfigByName(name, filename).propertyDependsOn));
+        stackConfigProperty? stackConfigProperty.propertyDependsOn : []));
     } else {
       Em.set(config, 'value', recommended);
     }


[03/33] ambari git commit: AMBARI-21268. Remove Upgrade Catalogs For Every Version Before 2.5 - fix build error: remove leftover Python test

Posted by rl...@apache.org.
AMBARI-21268. Remove Upgrade Catalogs For Every Version Before 2.5 - fix build error: remove leftover Python test


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4522cf5a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4522cf5a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4522cf5a

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 4522cf5a6a9389dbb7867938ce72cd2d734dc20d
Parents: 6eaabc1
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Tue Jun 27 09:03:02 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Tue Jun 27 09:03:02 2017 +0200

----------------------------------------------------------------------
 .../src/test/python/TestUpgradeHelper.py        | 1028 ------------------
 1 file changed, 1028 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4522cf5a/ambari-server/src/test/python/TestUpgradeHelper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestUpgradeHelper.py b/ambari-server/src/test/python/TestUpgradeHelper.py
deleted file mode 100644
index 6da4507..0000000
--- a/ambari-server/src/test/python/TestUpgradeHelper.py
+++ /dev/null
@@ -1,1028 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-
-from mock.mock import MagicMock, call
-from mock.mock import patch
-
-from unittest import TestCase
-import sys
-import os
-import unittest
-import upgradeHelper
-import json
-import copy
-from StringIO import StringIO
-
-
-class UpgradeCatalogFactoryMock(upgradeHelper.UpgradeCatalogFactory):
-  def __init__(self, data):
-    self._load(data)
-
-  def _load(self, data):
-    fn = StringIO(data)
-    with patch("__builtin__.open") as open_mock:
-      open_mock.return_value = fn
-      super(UpgradeCatalogFactoryMock, self)._load("")
-
-
-class TestUpgradeHelper(TestCase):
-  original_curl = None
-  out = None
-  catalog_from = "1.3"
-  catalog_to = "2.2"
-  catalog_cfg_type = "my type"
-  required_service = "TEST"
-  curl_response = "{}"
-  test_catalog = """{
-   "version": "1.0",
-   "stacks": [
-     {
-       "name": "HDP",
-       "old-version": "%s",
-       "target-version": "%s",
-       "options": {
-         "config-types": {
-           "%s": {
-             "merged-copy": "yes"
-           }
-          }
-       },
-       "properties": {
-         "%s": {
-           "my property": {
-             "value": "my value",
-             "required-services": [\"%s\"]
-           }
-         }
-       },
-       "property-mapping": {
-         "my replace property": "my property 2"
-       }
-     }
-   ]
-  }
-  """
-
-  def setUp(self):
-    # replace original curl call to mock
-    self.test_catalog = self.test_catalog % (self.catalog_from, self.catalog_to,
-                                             self.catalog_cfg_type, self.catalog_cfg_type,
-                                             self.required_service)
-
-    self.original_curl = upgradeHelper.curl
-    upgradeHelper.curl = self.magic_curl
-
-    # mock logging methods
-    upgradeHelper.logging.getLogger = MagicMock()
-    upgradeHelper.logging.FileHandler = MagicMock()
-
-    self.out = StringIO()
-    sys.stdout = self.out
-
-  def magic_curl(self, *args, **kwargs):
-    resp = self.curl_response
-    self.curl_response = "{}"
-    if "parse" in kwargs and isinstance(resp, str) and kwargs["parse"] == True:
-      resp = json.loads(resp)
-    return resp
-
-  def tearDown(self):
-    sys.stdout = sys.__stdout__
-
-  @patch("optparse.OptionParser")
-  @patch("upgradeHelper.modify_configs")
-  @patch("__builtin__.open")
-  def test_ParseOptions(self, open_mock, modify_action_mock, option_parser_mock):
-    class options(object):
-      user = "test_user"
-      hostname = "127.0.0.1"
-      clustername = "test1"
-      password = "test_password"
-      upgrade_json = "catalog_file"
-      from_stack = "0.0"
-      to_stack = "1.3"
-      logfile = "test.log"
-      report = "report.txt"
-      https = False
-      port = "8080"
-      warnings = []
-      printonly = False
-
-    args = ["update-configs"]
-    modify_action_mock.return_value = MagicMock()
-    test_mock = MagicMock()
-    test_mock.parse_args = lambda: (options, args)
-    option_parser_mock.return_value = test_mock
-
-    upgradeHelper.main()
-
-    self.assertEqual("8080", upgradeHelper.Options.API_PORT)
-    self.assertEqual("http", upgradeHelper.Options.API_PROTOCOL)
-    self.assertEqual(1, modify_action_mock.call_count)
-    self.assertEqual({"user": options.user, "pass": options.password}, upgradeHelper.Options.API_TOKENS)
-    self.assertEqual(options.clustername, upgradeHelper.Options.CLUSTER_NAME)
-
-  def test_is_services_exists(self):
-    old_services = upgradeHelper.Options.SERVICES
-
-    upgradeHelper.Options.SERVICES = set(['TEST1', 'TEST2'])
-    actual_result = upgradeHelper.is_services_exists(['TEST1'])
-
-    # check for situation with two empty sets
-    upgradeHelper.Options.SERVICES = set()
-    actual_result_1 = upgradeHelper.is_services_exists([])
-
-    upgradeHelper.Options.SERVICES = old_services
-
-    self.assertEqual(True, actual_result)
-    self.assertEqual(True, actual_result_1)
-
-
-  @patch("__builtin__.open")
-  @patch.object(os.path, "isfile")
-  @patch("os.remove")
-  def test_write_mapping(self, remove_mock, isfile_mock, open_mock):
-    test_data = {
-      "test_field": "test_value"
-    }
-    test_result = json.dumps(test_data)
-    output = StringIO()
-    isfile_mock.return_value = True
-    open_mock.return_value = output
-
-    # execute testing function
-    upgradeHelper.write_mapping(test_data)
-
-    self.assertEquals(1, isfile_mock.call_count)
-    self.assertEquals(1, remove_mock.call_count)
-    self.assertEquals(1, open_mock.call_count)
-
-    # check for content
-    self.assertEquals(test_result, output.getvalue())
-
-  @patch("__builtin__.open")
-  @patch.object(os.path, "isfile")
-  def test_read_mapping(self, isfile_mock, open_mock):
-    test_data = {
-      "test_field": "test_value"
-    }
-    test_result = json.dumps(test_data)
-    isfile_mock.return_value = True
-    output = StringIO(test_result)
-    open_mock.return_value = output
-
-    # execute testing function
-    actual_mapping = upgradeHelper.read_mapping()
-
-    self.assertEquals(1, isfile_mock.call_count)
-    self.assertEquals(1, open_mock.call_count)
-
-    self.assertEquals(test_data, actual_mapping)
-
-  @patch.object(upgradeHelper, "curl")
-  @patch.object(upgradeHelper, "write_mapping")
-  def test_get_mr1_mapping(self, write_mapping_mock, curl_mock):
-    return_data = [
-     {
-      "host_components": [   # MAPREDUCE_CLIENT
-        {
-          "HostRoles": {
-            "host_name": "test.host.vm"
-           }
-        }
-      ]
-     },
-     {
-      "host_components": [  # JOBTRACKER
-        {
-          "HostRoles": {
-            "host_name": "test1.host.vm"
-           }
-        }
-      ]
-     },
-     {
-      "host_components": [  # TASKTRACKER
-        {
-          "HostRoles": {
-            "host_name": "test2.host.vm"
-           }
-        }
-      ]
-     },
-     {
-      "host_components": [  # HISTORYSERVER
-        {
-          "HostRoles": {
-            "host_name": "test3.host.vm"
-           }
-        }
-      ]
-     }
-    ]
-    expect_data = {
-      "MAPREDUCE_CLIENT": ["test.host.vm"],
-      "JOBTRACKER": ["test1.host.vm"],
-      "TASKTRACKER": ["test2.host.vm"],
-      "HISTORYSERVER": ["test3.host.vm"]
-    }
-
-    tricky_mock = MagicMock(side_effect=return_data)
-    curl_mock.side_effect = tricky_mock
-
-    # execute testing function
-    upgradeHelper.get_mr1_mapping()
-
-    self.assertEquals(write_mapping_mock.call_count, 1)
-    self.assertEquals(expect_data, write_mapping_mock.call_args[0][0])
-
-  @patch.object(upgradeHelper, "get_choice_string_input")
-  def test_get_YN_input(self, get_choice_string_input_mock):
-    yes = set(['yes', 'ye', 'y'])
-    no = set(['no', 'n'])
-
-    prompt = "test prompt"
-    default = "default value"
-
-    # execute testing function
-    upgradeHelper.get_YN_input(prompt, default)
-
-    expect_args = (prompt, default, yes, no)
-    self.assertEquals(expect_args, get_choice_string_input_mock.call_args[0])
-
-  @patch("__builtin__.raw_input")
-  def test_get_choice_string_input(self, raw_input_mock):
-    yes = set(['yes', 'ye', 'y'])
-    no = set(['no', 'n'])
-    input_answers = ["yes", "no", ""]
-    tricky_mock = MagicMock(side_effect=input_answers)
-    raw_input_mock.side_effect = tricky_mock
-    default = "default value"
-
-    expect_result = [True, False, default]
-    actual_result = []
-    for i in range(0, len(input_answers)):
-      actual_result.append(upgradeHelper.get_choice_string_input("test prompt", default, yes, no))
-
-    self.assertEquals(expect_result, actual_result)
-
-  @patch.object(upgradeHelper, "get_YN_input")
-  @patch.object(upgradeHelper, "read_mapping")
-  @patch.object(upgradeHelper, "curl")
-  def test_delete_mr(self, curl_mock, read_mapping_mock, get_YN_mock):
-    COMPONENT_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/hosts/%s/host_components/%s'
-    SERVICE_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/services/MAPREDUCE'
-    NON_CLIENTS = ["JOBTRACKER", "TASKTRACKER", "HISTORYSERVER"]
-    PUT_IN_DISABLED = {
-      "HostRoles": {
-        "state": "DISABLED"
-      }
-    }
-    mr_mapping = {
-      "MAPREDUCE_CLIENT": ["test.host.vm"],
-      "JOBTRACKER": ["test1.host.vm"],
-      "TASKTRACKER": ["test2.host.vm"],
-      "HISTORYSERVER": ["test3.host.vm"]
-    }
-    expected_curl_exec_args = []
-    for key, hosts in mr_mapping.items():
-      if key in NON_CLIENTS:
-        for host in hosts:
-          expected_curl_exec_args.append(
-            [
-              (COMPONENT_URL_FORMAT % (host, key),),
-              {
-                "request_type": "PUT",
-                "data": PUT_IN_DISABLED,
-                "validate": True
-              }
-            ]
-          )
-
-    expected_curl_exec_args.append(
-      [
-        (SERVICE_URL_FORMAT,),
-        {
-          "request_type": "DELETE",
-          "validate": True
-        }
-      ]
-    )
-
-    get_YN_mock.return_value = True
-    read_mapping_mock.return_value = mr_mapping
-
-    # execute testing function
-    upgradeHelper.delete_mr()
-
-    self.assertEqual(expected_curl_exec_args, curl_mock.call_args_list)
-
-    pass
-
-  @patch.object(upgradeHelper, "curl")
-  def test_get_cluster_stackname(self, curl_mock):
-    expected_result = "test version"
-    actual_result = ""
-    curl_mock.return_value = {
-      "Clusters": {
-        "version": expected_result
-      }
-    }
-
-    # execute testing function
-    actual_result = upgradeHelper.get_cluster_stackname()
-
-    self.assertEqual(expected_result, actual_result)
-
-  @patch.object(upgradeHelper, "curl")
-  def test_has_component_in_stack_def(self, curl_mock):
-    curl_mock.side_effect = MagicMock(side_effect=["", upgradeHelper.FatalException(1, "some reason")])
-
-    # execute testing function
-    result_ok = upgradeHelper.has_component_in_stack_def("-", "", "")
-    result_fail = upgradeHelper.has_component_in_stack_def("-", "", "")
-
-    self.assertEqual(True, result_ok)
-    self.assertEqual(False, result_fail)
-
-  @patch.object(upgradeHelper, "get_cluster_stackname")
-  @patch.object(upgradeHelper, "has_component_in_stack_def")
-  @patch.object(upgradeHelper, "read_mapping")
-  @patch.object(upgradeHelper, "curl")
-  def test_add_services(self, curl_mock, read_mapping_mock, has_component_mock, get_stack_name_mock):
-    host_mapping = {
-      "MAPREDUCE_CLIENT": ["test.host.vm"],
-      "JOBTRACKER": ["test1.host.vm"],
-      "TASKTRACKER": ["test2.host.vm"],
-      "HISTORYSERVER": ["test3.host.vm"]
-    }
-    SERVICE_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/services/{0}'
-    COMPONENT_URL_FORMAT = SERVICE_URL_FORMAT + '/components/{1}'
-    HOST_COMPONENT_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/hosts/{0}/host_components/{1}'
-    service_comp = {
-      "YARN": ["NODEMANAGER", "RESOURCEMANAGER", "YARN_CLIENT"],
-      "MAPREDUCE2": ["HISTORYSERVER", "MAPREDUCE2_CLIENT"]}
-    new_old_host_map = {
-      "NODEMANAGER": "TASKTRACKER",
-      "HISTORYSERVER": "HISTORYSERVER",
-      "RESOURCEMANAGER": "JOBTRACKER",
-      "YARN_CLIENT": "MAPREDUCE_CLIENT",
-      "MAPREDUCE2_CLIENT": "MAPREDUCE_CLIENT"}
-    get_stack_name_mock.return_value = ""
-    has_component_mock.return_value = False
-    read_mapping_mock.return_value = host_mapping
-    expected_curl_args = []
-
-    for service in service_comp.keys():
-      expected_curl_args.append([
-        (SERVICE_URL_FORMAT.format(service),),
-        {
-          "validate": True,
-          "request_type": "POST"
-        }
-      ])
-      for component in service_comp[service]:
-        expected_curl_args.append([
-          (COMPONENT_URL_FORMAT.format(service, component),),
-          {
-            "validate": True,
-            "request_type": "POST"
-          }
-        ])
-        for host in host_mapping[new_old_host_map[component]]:
-          expected_curl_args.append([
-            (HOST_COMPONENT_URL_FORMAT.format(host, component),),
-            {
-              "validate": True,
-              "request_type": "POST"
-            }
-          ])
-
-    # execute testing function
-    upgradeHelper.add_services()
-
-    self.assertEqual(expected_curl_args, curl_mock.call_args_list)
-
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  def test_coerce_tag(self, get_config_resp_all_mock):
-    test_catalog = """
-        {
-      "version": "1.0",
-      "stacks": [
-        {
-          "name": "HDP",
-          "old-version": "1.0",
-          "target-version": "1.1",
-          "options": {
-            "config-types":{
-              "test": {
-                "merged-copy": "yes"
-              }
-            }
-          },
-          "properties": {
-             "test": {
-               "test": "host1.com"
-            }
-          },
-          "property-mapping": {
-            "test":{
-                "map-to": "test-arr",
-                "coerce-to": "yaml-array"
-           }
-          }
-        }
-      ]
-    }
-    """
-    old_opt = upgradeHelper.Options.OPTIONS
-    options = lambda: ""
-    options.from_stack = "1.0"
-    options.to_stack = "1.1"
-    options.upgrade_json = ""
-
-    upgradeHelper.Options.OPTIONS = options
-    upgradeHelper.Options.SERVICES = [self.required_service]
-    get_config_resp_all_mock.return_value = {
-      "test": {
-        "properties": {}
-      }
-    }
-
-    ucf = UpgradeCatalogFactoryMock(test_catalog)
-    scf = upgradeHelper.ServerConfigFactory()
-
-    cfg = scf.get_config("test")
-    ucfg = ucf.get_catalog("1.0", "1.1")
-
-    cfg.merge(ucfg)
-    scf.process_mapping_transformations(ucfg)
-
-    upgradeHelper.Options.OPTIONS = old_opt
-
-    self.assertEqual(True, "test-arr" in cfg.properties)
-    self.assertEqual("['host1.com']", cfg.properties["test-arr"])
-
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  def test_override_tag(self, get_config_resp_all_mock):
-    test_catalog = """
-        {
-      "version": "1.0",
-      "stacks": [
-        {
-          "name": "HDP",
-          "old-version": "1.0",
-          "target-version": "1.1",
-          "options": {
-            "config-types":{
-              "test": {
-                "merged-copy": "yes"
-              }
-            }
-          },
-          "properties": {
-             "test": {
-               "test_property": {
-                  "value": "host1.com",
-                  "override": "no"
-                }
-
-            }
-          },
-          "property-mapping": {}
-        }
-      ]
-    }
-    """
-    old_opt = upgradeHelper.Options.OPTIONS
-    options = lambda: ""
-    options.from_stack = "1.0"
-    options.to_stack = "1.1"
-    options.upgrade_json = ""
-
-    upgradeHelper.Options.OPTIONS = options
-    upgradeHelper.Options.SERVICES = [self.required_service]
-    get_config_resp_all_mock.return_value = {
-      "test": {
-        "properties": {
-          "test_property": "test host"
-        }
-      }
-    }
-
-    ucf = UpgradeCatalogFactoryMock(test_catalog)
-    scf = upgradeHelper.ServerConfigFactory()
-
-    cfg = scf.get_config("test")
-    ucfg = ucf.get_catalog("1.0", "1.1")
-
-    cfg.merge(ucfg)
-    scf.process_mapping_transformations(ucfg)
-
-    upgradeHelper.Options.OPTIONS = old_opt
-
-    self.assertEqual(True, "test_property" in cfg.properties)
-    self.assertEqual("test host", cfg.properties["test_property"])
-
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  def test_replace_tag(self, get_config_resp_all_mock):
-    test_catalog = """
-        {
-      "version": "1.0",
-      "stacks": [
-        {
-          "name": "HDP",
-          "old-version": "1.0",
-          "target-version": "1.1",
-          "options": {
-            "config-types":{
-              "test": {
-                "merged-copy": "yes"
-              }
-            }
-          },
-          "properties": {
-             "test": {
-               "test": "host1.com"
-            }
-          },
-          "property-mapping": {
-            "test":{
-                "map-to": "test-arr",
-                "replace-from": "com",
-                "replace-to": "org"
-           }
-          }
-        }
-      ]
-    }
-    """
-    old_opt = upgradeHelper.Options.OPTIONS
-    options = lambda: ""
-    options.from_stack = "1.0"
-    options.to_stack = "1.1"
-    options.upgrade_json = ""
-
-    upgradeHelper.Options.OPTIONS = options
-    upgradeHelper.Options.SERVICES = [self.required_service]
-    get_config_resp_all_mock.return_value = {
-      "test": {
-        "properties": {}
-      }
-    }
-
-    ucf = UpgradeCatalogFactoryMock(test_catalog)
-    scf = upgradeHelper.ServerConfigFactory()
-
-    cfg = scf.get_config("test")
-    ucfg = ucf.get_catalog("1.0", "1.1")
-
-    cfg.merge(ucfg)
-    scf.process_mapping_transformations(ucfg)
-
-    upgradeHelper.Options.OPTIONS = old_opt
-
-    self.assertEqual(True, "test-arr" in cfg.properties)
-    self.assertEqual("host1.org", cfg.properties["test-arr"])
-
-  @patch.object(upgradeHelper, "curl")
-  @patch("time.time")
-  def test_update_config(self, time_mock, curl_mock):
-    time_pass = 2
-    config_type = "test config"
-    properties = {
-      "test property": "test value"
-    }
-    attributes = {
-      "test attribute": "attribute value"
-    }
-    expected_tag = "version" + str(int(time_pass * 1000))
-    properties_payload = {"Clusters": {"desired_config": {"type": config_type, "tag": expected_tag, "properties": properties}}}
-    time_mock.return_value = time_pass
-
-    expected_simple_result = (
-      (upgradeHelper.Options.CLUSTER_URL,),
-      {
-        "request_type": "PUT",
-        "data": copy.deepcopy(properties_payload),
-        "validate": True,
-        "soft_validation": True
-      }
-    )
-
-    properties_payload["Clusters"]["desired_config"]["properties_attributes"] = attributes
-    expected_complex_result = (
-      (upgradeHelper.Options.CLUSTER_URL,),
-      {
-        "request_type": "PUT",
-        "data": copy.deepcopy(properties_payload),
-        "validate": True,
-        "soft_validation": True
-      }
-    )
-
-    # execute testing function
-    upgradeHelper.update_config(properties, config_type)
-    simple_result = tuple(curl_mock.call_args)
-
-    upgradeHelper.update_config(properties, config_type, attributes)
-    complex_result = tuple(curl_mock.call_args)
-
-    self.assertEqual(expected_simple_result, simple_result)
-    self.assertEqual(expected_complex_result, complex_result)
-
-  @patch.object(upgradeHelper, "curl")
-  def test_get_zookeeper_quorum(self, curl_mock):
-    zoo_def_port = "2181"
-    return_curl_data = {
-      "host_components": [
-                           {
-                             "HostRoles": {
-                               "host_name": "test.host.vm"
-                             }
-                           },
-                           {
-                             "HostRoles": {
-                               "host_name": "test.host.vm"
-                             }
-                           }
-      ]
-    }
-
-    curl_mock.return_value = copy.deepcopy(return_curl_data)
-
-    # build zookeeper quorum string from return_curl_data and remove trailing comas
-    expected_result = reduce(
-      lambda x, y: x + "%s:%s," % (y["HostRoles"]["host_name"], zoo_def_port),
-      return_curl_data["host_components"],
-      ''  # initializer
-    ).rstrip(',')
-
-    # execute testing function
-    actual_result = upgradeHelper.get_zookeeper_quorum()
-
-    self.assertEqual(expected_result, actual_result)
-
-  @patch.object(upgradeHelper, "curl")
-  def test_get_tez_history_url_base(self, curl_mock):
-    return_curl_data = {
-      'href': 'http://127.0.0.1:8080/api/v1/views/TEZ',
-      'ViewInfo': {'view_name': 'TEZ'},
-      'versions': [
-        {
-          'ViewVersionInfo': {
-            'view_name': 'TEZ',
-            'version': '0.7.0.2.3.0.0-1319'
-          },
-          'href': 'http://127.0.0.1:8080/api/v1/views/TEZ/versions/0.7.0.2.3.0.0-1319'
-        }
-      ]
-    }
-
-    curl_mock.return_value = copy.deepcopy(return_curl_data)
-
-    # build zookeeper quorum string from return_curl_data and remove trailing comas
-    expected_result = "http://127.0.0.1:8080/#/main/views/TEZ/0.7.0.2.3.0.0-1319/TEZ_CLUSTER_INSTANCE"
-
-    # execute testing function
-    actual_result = upgradeHelper.get_tez_history_url_base()
-
-    self.assertEqual(expected_result, actual_result)
-
-  @patch.object(upgradeHelper, "curl")
-  def test_get_ranger_xaaudit_hdfs_destination_directory(self, curl_mock):
-    return_curl_data = {
-      "host_components": [
-        {
-          "HostRoles": {
-            "host_name": "test.host.vm"
-          }
-        }
-      ]
-    }
-
-    curl_mock.return_value = copy.deepcopy(return_curl_data)
-
-    # build zookeeper quorum string from return_curl_data and remove trailing comas
-    expected_result = "hdfs://test.host.vm:8020/ranger/audit"
-
-    # execute testing function
-    actual_result = upgradeHelper.get_ranger_xaaudit_hdfs_destination_directory()
-
-    self.assertEqual(expected_result, actual_result)
-
-
-  @patch.object(upgradeHelper, "curl")
-  def test_get_config_resp_all(self, curl_mock):
-    cfg_type = "my type"
-    cfg_tag = "my tag"
-    cfg_properties = {
-      "my property": "property value"
-    }
-    curl_resp = [
-      {
-        'Clusters': {
-          'desired_configs': {
-            cfg_type: {
-              "tag": cfg_tag
-            }
-          }
-        }
-      },
-      {
-        "items": [
-          {
-            "type": cfg_type,
-            "tag": cfg_tag,
-            "properties": cfg_properties
-          }
-        ]
-      }
-    ]
-
-    expected_result = {
-        cfg_type: {
-          "properties": cfg_properties,
-          "tag": cfg_tag
-        }
-      }
-    curl_mock.side_effect = MagicMock(side_effect=curl_resp)
-
-    # execute testing function
-    actual_result = upgradeHelper.get_config_resp_all()
-
-    self.assertEquals(expected_result, actual_result)
-    pass
-
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  @patch("os.mkdir")
-  @patch("os.path.exists")
-  @patch("__builtin__.open")
-  def test_backup_configs(self, open_mock, os_path_exists_mock, mkdir_mock, get_config_resp_all_mock):
-    data = {
-      self.catalog_cfg_type: {
-        "properties": {
-          "test-property": "value"
-        },
-        "tag": "version1"
-      }
-    }
-    os_path_exists_mock.return_value = False
-    get_config_resp_all_mock.return_value = data
-    expected = json.dumps(data[self.catalog_cfg_type]["properties"], indent=4)
-    stream = StringIO()
-    m = MagicMock()
-    m.__enter__.return_value = stream
-    open_mock.return_value = m
-
-    # execute testing function
-    upgradeHelper.backup_configs(self.catalog_cfg_type)
-
-    self.assertEqual(expected, stream.getvalue())
-
-  @patch.object(upgradeHelper, "curl")
-  def test_install_services(self, curl_mock):
-    expected_args = (
-      (
-        ('http://127.0.0.1:8080/api/v1/clusters/test1/services/MAPREDUCE2',),
-        {
-          'request_type': 'PUT',
-          'data': {
-            'RequestInfo': {
-              'context': 'Install MapReduce2'
-            },
-            'Body': {
-              'ServiceInfo': {
-                'state': 'INSTALLED'
-              }
-            }
-          },
-          'validate': True
-        }
-      ),
-      (
-        ('http://127.0.0.1:8080/api/v1/clusters/test1/services/YARN',),
-        {
-          'request_type': 'PUT',
-          'data': {
-            'RequestInfo': {
-              'context': 'Install YARN'
-            },
-            'Body': {
-              'ServiceInfo': {
-                'state': 'INSTALLED'
-              }
-            }
-          },
-          'validate': True
-        }
-      )
-    )
-
-    # execute testing function
-    upgradeHelper.install_services()
-
-    self.assertEqual(2, curl_mock.call_count)
-    for i in range(0, 1):
-      self.assertEqual(expected_args[i], tuple(curl_mock.call_args_list[i]))
-
-  def test_configuration_diff_analyze(self):
-    in_data = {
-        self.catalog_cfg_type: [
-          {
-            'catalog_item': {
-              'value': 'my value'
-            },
-            'property': 'my property',
-            'actual_value': 'my value',
-            'catalog_value': 'my value'
-          }
-        ]
-    }
-
-    expected_result = {
-      'my type': {
-        'fail': {
-          'count': 0,
-          'items': []
-        },
-        'total': {
-          'count': 1,
-          'items': []
-        },
-      'skipped': {
-        'count': 0,
-        'items': []
-      },
-        'ok': {
-          'count': 1,
-          'items': [
-                    {
-                      'catalog_item': {
-                        'value': 'my value'
-                      },
-                      'property': 'my property',
-                      'actual_value': 'my value',
-                      'catalog_value': 'my value'
-                    }
-          ]
-        }
-      }
-    }
-
-    # execute testing function
-    actual_result = upgradeHelper.configuration_diff_analyze(in_data)
-
-    self.assertEqual(expected_result, actual_result)
-
-  @patch.object(upgradeHelper, "UpgradeCatalogFactory", autospec=True)
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  @patch.object(upgradeHelper, "configuration_item_diff")
-  @patch.object(upgradeHelper, "configuration_diff_analyze")
-  @patch("__builtin__.open")
-  def test_verify_configuration(self, open_mock, configuration_diff_analyze_mock, configuration_item_diff_mock,
-                                get_config_resp_all_mock, upgradecatalogfactory_mock):
-    old_opt = upgradeHelper.Options.OPTIONS
-    options = lambda: ""
-    options.from_stack = self.catalog_from
-    options.to_stack = self.catalog_to
-    options.upgrade_json = ""
-
-    upgradeHelper.Options.OPTIONS = options
-    upgradeHelper.Options.SERVICES = [self.required_service]
-    upgradecatalogfactory_mock.return_value = UpgradeCatalogFactoryMock(self.test_catalog)
-    get_config_resp_all_mock.return_value = {
-      self.catalog_cfg_type: {
-        "properties": {}
-      }
-    }
-
-    # execute testing function
-    upgradeHelper.verify_configuration()
-
-    upgradeHelper.Options.OPTIONS = old_opt
-
-    self.assertEqual(1, get_config_resp_all_mock.call_count)
-    self.assertEqual(1, configuration_item_diff_mock.call_count)
-    self.assertEqual(1, configuration_diff_analyze_mock.call_count)
-    self.assertEqual(1, open_mock.call_count)
-
-  def test_report_formatter(self):
-    file = StringIO()
-    cfg_item = self.catalog_cfg_type
-    analyzed_list = {
-        'fail': {
-          'count': 1,
-          'items': [
-            {
-              'catalog_item': {
-                'value': 'my value'
-              },
-              'property': 'my property',
-              'actual_value': 'my value 1',
-              'catalog_value': 'my value'
-            }
-          ]
-        },
-        'total': {
-          'count': 1,
-          'items': []
-        },
-        'skipped': {
-          'count': 0,
-          'items': []
-        },
-        'ok': {
-          'count': 0,
-          'items': []
-        }
-    }
-
-    expected_output = "Configuration item my type: property \"my property\" is set to \"my value 1\", but should be set to \"my value\"\n"
-
-    # execute testing function
-    upgradeHelper.report_formatter(file, cfg_item, analyzed_list)
-
-    self.assertEqual(expected_output, file.getvalue())
-
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  def test_conditional_replace(self, get_config_resp_all_mock):
-    test_catalog = """
-        {
-      "version": "1.0",
-      "stacks": [
-        {
-          "name": "HDP",
-          "old-version": "1.0",
-          "target-version": "1.1",
-          "options": {
-            "config-types":{
-              "test": {
-                "merged-copy": "yes"
-              }
-            }
-          },
-          "properties": {
-             "test": {
-               "test": {
-                 "value": "10",
-                 "value-required": "-1"
-               },
-               "test2": {
-                 "value": "10",
-                 "value-required": "-2"
-               }
-            }
-          },
-          "property-mapping": {
-          }
-        }
-      ]
-    }
-    """
-
-    expected_properties = {"test":"10", "test2":"15"}
-
-    old_opt = upgradeHelper.Options.OPTIONS
-    options = lambda: ""
-    options.from_stack = "1.0"
-    options.to_stack = "1.1"
-    options.upgrade_json = ""
-
-    upgradeHelper.Options.OPTIONS = options
-    upgradeHelper.Options.SERVICES = [self.required_service]
-    get_config_resp_all_mock.return_value = {
-      "test": {
-        "properties": {"test":"-1", "test2":"15"}
-      }
-    }
-
-    ucf = UpgradeCatalogFactoryMock(test_catalog)
-    scf = upgradeHelper.ServerConfigFactory()
-
-    cfg = scf.get_config("test")
-    ucfg = ucf.get_catalog("1.0", "1.1")
-
-    cfg.merge(ucfg)
-    upgradeHelper.Options.OPTIONS = old_opt
-
-    self.assertEqual(expected_properties, cfg.properties)
-
-if __name__ == "__main__":
-  unittest.main()


[33/33] ambari git commit: Merge branch 'trunk' into branch-feature-AMBARI-20859

Posted by rl...@apache.org.
Merge branch 'trunk' into branch-feature-AMBARI-20859


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0945f28e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0945f28e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0945f28e

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 0945f28e65e7627b9a7f85c00027795f7c772b32
Parents: 12ae259 d7c59fc
Author: Robert Levas <rl...@hortonworks.com>
Authored: Fri Jun 30 05:45:59 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Jun 30 05:45:59 2017 -0400

----------------------------------------------------------------------
 .../controllers/ambariViews/ViewsListCtrl.js    |   20 +
 .../controllers/groups/GroupsEditCtrl.js        |   19 +-
 .../app/views/ambariViews/listTable.html        |    3 +
 .../ui/admin-web/app/views/groups/edit.html     |    3 +
 .../ambari_agent/AlertSchedulerHandler.py       |   10 +-
 .../python/ambari_agent/alerts/base_alert.py    |    8 +-
 .../python/ambari_agent/alerts/port_alert.py    |  107 +-
 .../ambari_agent/TestAlertSchedulerHandler.py   |   17 +-
 .../libraries/functions/mounted_dirs_helper.py  |    1 +
 .../libraries/functions/packages_analyzer.py    |   15 +-
 ambari-infra/ambari-infra-manager/pom.xml       |    1 -
 ambari-infra/ambari-infra-solr-plugin/pom.xml   |    4 +-
 ambari-infra/pom.xml                            |    2 +-
 .../ambari-logsearch-logfeeder/pom.xml          |    4 +-
 .../ambari-logsearch-server/pom.xml             |   12 +-
 ambari-logsearch/pom.xml                        |    5 +-
 ambari-metrics/ambari-metrics-common/pom.xml    |    4 +
 .../timeline/HBaseTimelineMetricStore.java      |    9 +-
 .../timeline/TimelineMetricConfiguration.java   |   11 +
 .../timeline/TestTimelineMetricStore.java       |    1 +
 ambari-server/docs/configuration/index.md       |   13 +-
 .../server/agent/AlertDefinitionCommand.java    |    7 +-
 .../ambari/server/agent/ExecutionCommand.java   |    4 +
 .../ambari/server/agent/HeartBeatHandler.java   |    4 +-
 .../server/configuration/Configuration.java     |   65 +-
 .../controller/AmbariActionExecutionHelper.java |    2 +
 .../AmbariCustomCommandExecutionHelper.java     |   12 +-
 .../controller/AmbariManagementController.java  |    4 +
 .../AmbariManagementControllerImpl.java         |   43 +-
 .../ambari/server/controller/AmbariServer.java  |    4 +
 .../server/controller/ConfigGroupResponse.java  |   10 +
 .../controller/DeleteIdentityHandler.java       |  283 +++++
 .../server/controller/KerberosHelper.java       |    3 +
 .../server/controller/KerberosHelperImpl.java   |   31 +-
 .../OrderedRequestStageContainer.java           |   62 ++
 .../internal/AbstractProviderModule.java        |   47 +-
 .../internal/ClientConfigResourceProvider.java  |    9 +-
 .../internal/ConfigGroupResourceProvider.java   |   31 +-
 .../server/controller/jmx/JMXHostProvider.java  |   11 +
 .../controller/jmx/JMXPropertyProvider.java     |   24 +
 .../utilities/KerberosIdentityCleaner.java      |  135 +++
 .../ambari/server/orm/DBAccessorImpl.java       |   48 +-
 .../orm/helpers/dbms/GenericDbmsHelper.java     |    2 +-
 .../AbstractPrepareKerberosServerAction.java    |   19 +-
 .../server/serveraction/kerberos/Component.java |   74 ++
 .../kerberos/FinalizeKerberosServerAction.java  |   27 +-
 .../kerberos/KerberosServerAction.java          |   27 +
 .../ambari/server/stack/MasterHostResolver.java |   11 +-
 .../org/apache/ambari/server/state/Cluster.java |    8 +
 .../apache/ambari/server/state/ConfigImpl.java  |    3 +-
 .../server/state/alert/AlertDefinitionHash.java |   14 +-
 .../server/state/cluster/ClusterImpl.java       |   18 +
 .../kerberos/AbstractKerberosDescriptor.java    |   15 +
 .../kerberos/KerberosComponentDescriptor.java   |   15 +
 .../state/kerberos/KerberosDescriptor.java      |    8 -
 .../kerberos/KerberosIdentityDescriptor.java    |   30 +
 .../kerberos/KerberosServiceDescriptor.java     |    6 +
 .../apache/ambari/server/utils/StageUtils.java  |   54 +
 ambari-server/src/main/python/ambari-server.py  |    2 +
 .../python/ambari_server/serverConfiguration.py |    6 +
 .../main/python/ambari_server/serverSetup.py    |  105 +-
 .../0.1.0/package/scripts/params.py             |    4 +-
 .../0.1.0/package/scripts/params.py             |   10 +-
 .../ATLAS/0.1.0.2.3/package/scripts/metadata.py |    8 +-
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |    1 +
 .../HBASE/0.96.0.2.0/role_command_order.json    |    3 +-
 .../HBASE/2.0.0.3.0/role_command_order.json     |    2 +-
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |    4 +-
 .../package/scripts/namenode_upgrade.py         |    2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |    4 +
 .../HIVE/2.1.0.3.0/service_advisor.py           |   19 +
 .../LOGSEARCH/0.5.0/package/scripts/params.py   |    4 +-
 .../RANGER/0.4.0/package/scripts/params.py      |    1 +
 .../0.4.0/package/scripts/setup_ranger_xml.py   |   10 +-
 .../2.0.6/hooks/before-ANY/scripts/params.py    |    2 +
 .../before-ANY/scripts/shared_initialization.py |   30 +-
 .../2.0.6/hooks/before-START/scripts/params.py  |    4 +
 .../scripts/shared_initialization.py            |   22 +-
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |   10 +-
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |    7 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |    5 +-
 .../stacks/HDP/2.6/services/stack_advisor.py    |   21 +
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |    9 +
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |    6 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |    1 +
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  |    3 +
 .../before-ANY/scripts/shared_initialization.py |   31 +-
 .../3.0/hooks/before-START/scripts/params.py    |    4 +
 .../scripts/shared_initialization.py            |   22 +-
 .../main/resources/stacks/HDP/3.0/metainfo.xml  |    2 +-
 .../PERF/1.0/hooks/before-ANY/scripts/params.py |    3 +
 .../before-ANY/scripts/shared_initialization.py |   23 +-
 .../src/main/resources/stacks/stack_advisor.py  |    2 +-
 .../AmbariManagementControllerImplTest.java     |   16 +-
 .../ClientConfigResourceProviderTest.java       |    8 +
 .../ConfigGroupResourceProviderTest.java        |    2 +
 .../metrics/JMXPropertyProviderTest.java        |    9 +
 .../utilities/KerberosIdentityCleanerTest.java  |  204 ++++
 .../ambari/server/orm/DBAccessorImplTest.java   |   29 +
 .../state/alerts/AlertDefinitionHashTest.java   |    4 +-
 .../ambari/server/utils/StageUtilsTest.java     |   99 ++
 .../src/test/python/TestAmbariServer.py         |   49 +-
 .../src/test/python/TestUpgradeHelper.py        | 1028 ------------------
 .../stacks/2.0.6/common/test_stack_advisor.py   |    2 +-
 .../configs/ha_bootstrap_standby_node.json      |    2 +-
 ...ha_bootstrap_standby_node_initial_start.json |    2 +-
 ...dby_node_initial_start_dfs_nameservices.json |    2 +-
 .../stacks/2.5/common/test_stack_advisor.py     |    2 +-
 .../stacks/2.6/common/test_stack_advisor.py     |  123 ++-
 ambari-web/app/controllers/main/service/item.js |    6 +-
 .../app/controllers/wizard/step8_controller.js  |   16 +-
 .../mixins/common/configs/enhanced_configs.js   |    3 +-
 ambari-web/app/models/stack.js                  |    4 +-
 .../stacks/ODPi/2.0/services/stack_advisor.py   |    2 +-
 .../src/main/resources/ui/app/styles/app.less   |    4 +-
 .../wfmanager/src/main/resources/ui/bower.json  |    4 +-
 .../hdfs-directory-viewer/addon/styles/app.css  |    1 +
 .../wfmanager/src/main/resources/ui/yarn.lock   |   68 +-
 docs/pom.xml                                    |   12 +
 119 files changed, 2159 insertions(+), 1373 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0945f28e/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/0945f28e/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/0945f28e/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/0945f28e/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------


[09/33] ambari git commit: Revert "AMBARI-21206 - Remove Zookeeper as a required service from YARN"

Posted by rl...@apache.org.
Revert "AMBARI-21206 - Remove Zookeeper as a required service from YARN"

This reverts commit a2464b9045637c1d5014db4aff7d83a0bc573fc0.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ebd79e98
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ebd79e98
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ebd79e98

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: ebd79e989984ee1fd55ebe6cdb4e8469874bd8b7
Parents: 535660b
Author: Tim Thorpe <tt...@apache.org>
Authored: Tue Jun 27 04:53:36 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Tue Jun 27 04:53:36 2017 -0700

----------------------------------------------------------------------
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  |  10 +-
 .../common-services/YARN/3.0.0.3.0/metainfo.xml |  46 +------
 .../YARN/3.0.0.3.0/service_advisor.py           |  53 +-------
 .../stacks/HDP/2.2/services/stack_advisor.py    |  53 +-------
 .../stacks/2.2/common/test_stack_advisor.py     | 132 +------------------
 .../stacks/2.6/common/test_stack_advisor.py     |   9 --
 6 files changed, 14 insertions(+), 289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
index c77aa2a..64e0bcb 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
@@ -486,10 +486,7 @@
   </property>
   <property>
     <name>hadoop.registry.zk.quorum</name>
-    <value></value>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
+    <value>localhost:2181</value>
     <description>
       List of hostname:port pairs defining the zookeeper quorum binding for the registry
     </description>
@@ -556,10 +553,7 @@
   </property>
   <property>
     <name>yarn.resourcemanager.zk-address</name>
-    <value></value>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
+    <value>localhost:2181</value>
     <description>
       List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
     </description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
index 90f4a92..061587d 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
@@ -73,41 +73,17 @@
             <timeout>1200</timeout>
           </commandScript>
 
+          <!-- TODO HDP 3.0, add later after UI is fixed,
           <dependencies>
             <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>false</enabled>
-              </auto-deploy>
-              <conditions>
-                <condition xsi:type="propertyExists">
-                  <configType>yarn-site</configType>
-                  <property>yarn.resourcemanager.recovery.enabled</property>
-                  <value>true</value>
-                </condition>
-                <condition xsi:type="propertyExists">
-                  <configType>yarn-site</configType>
-                  <property>yarn.resourcemanager.ha.enabled</property>
-                  <value>true</value>
-                </condition>
-                <condition xsi:type="propertyExists">
-                  <configType>yarn-site</configType>
-                  <property>hadoop.registry.rm.enabled</property>
-                  <value>true</value>
-                </condition>
-              </conditions>
-            </dependency>
-            <!-- TODO HDP 3.0, add later after UI is fixed,
-            <dependency>
               <name>TEZ/TEZ_CLIENT</name>
               <scope>host</scope>
               <auto-deploy>
                 <enabled>true</enabled>
               </auto-deploy>
             </dependency>
-            -->
           </dependencies>
+          -->
 
           <logs>
             <log>
@@ -169,23 +145,6 @@
               <logId>yarn_nodemanager</logId>
             </log>
           </logs>
-
-          <dependencies>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>false</enabled>
-              </auto-deploy>
-              <conditions>
-                <condition xsi:type="propertyExists">
-                  <configType>yarn-site</configType>
-                  <property>yarn.nodemanager.recovery.enabled</property>
-                  <value>true</value>
-                </condition>
-              </conditions>
-            </dependency>
-          </dependencies>
         </component>
 
         <component>
@@ -255,6 +214,7 @@
       <requiredServices>
         <service>HDFS</service>
         <service>MAPREDUCE2</service>
+        <service>ZOOKEEPER</service>
       </requiredServices>
 
       <themes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index 1af9821..0fb538d 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -351,21 +351,12 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.hierarchy', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount-path', 'delete', 'true')
-    # recommend hadoop.registry.rm.enabled based on SLIDER and ZOOKEEPER in services
+    # recommend hadoop.registry.rm.enabled based on SLIDER in services
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if "SLIDER" in servicesList and "ZOOKEEPER" in servicesList:
+    if "SLIDER" in servicesList:
       putYarnProperty('hadoop.registry.rm.enabled', 'true')
     else:
       putYarnProperty('hadoop.registry.rm.enabled', 'false')
-    # recommend enabling RM and NM recovery if ZOOKEEPER in services
-    if "ZOOKEEPER" in servicesList:
-      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'true')
-      putYarnProperty('yarn.nodemanager.recovery.enabled', 'true')
-    else:
-      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'false')
-      putYarnProperty('yarn.nodemanager.recovery.enabled', 'false')
-      # recommend disabling RM HA if ZOOKEEPER is not in services
-      putYarnProperty('yarn.resourcemanager.ha.enabled', 'false')
 
   def recommendYARNConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
@@ -1804,7 +1795,6 @@ class YARNValidator(service_advisor.ServiceAdvisor):
     self.as_super.__init__(*args, **kwargs)
 
     self.validators = [("yarn-site", self.validateYARNSiteConfigurationsFromHDP206),
-                       ("yarn-site", self.validateYARNSiteConfigurationsFromHDP22),
                        ("yarn-site", self.validateYARNSiteConfigurationsFromHDP25),
                        ("yarn-site" , self.validateYARNSiteConfigurationsFromHDP26),
                        ("yarn-env", self.validateYARNEnvConfigurationsFromHDP206),
@@ -1847,45 +1837,6 @@ class YARNValidator(service_advisor.ServiceAdvisor):
                         {"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
     return self.toConfigurationValidationProblems(validationItems, "yarn-site")
 
-  def validateYARNSiteConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
-    """
-    This was copied from HDP 2.2; validate yarn-site
-    :return: A list of configuration validation problems.
-    """
-    yarn_site = properties
-    validationItems = []
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-
-    zk_hosts = self.getHostsForComponent(services, "ZOOKEEPER", "ZOOKEEPER_SERVER")
-    if len(zk_hosts) == 0:
-      # ZOOKEEPER_SERVER isn't assigned to at least one host
-      if 'yarn.resourcemanager.recovery.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.resourcemanager.recovery.enabled']:
-        validationItems.append({"config-name": "yarn.resourcemanager.recovery.enabled",
-                                "item": self.getWarnItem(
-                                  "YARN resource manager recovery can only be enabled if ZOOKEEPER is installed.")})
-      if 'yarn.nodemanager.recovery.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.nodemanager.recovery.enabled']:
-        validationItems.append({"config-name": "yarn.nodemanager.recovery.enabled",
-                                "item": self.getWarnItem(
-                                  "YARN node manager recovery can only be enabled if ZOOKEEPER is installed.")})
-
-    if len(zk_hosts) < 3:
-      if 'yarn.resourcemanager.ha.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.resourcemanager.ha.enabled']:
-        validationItems.append({"config-name": "yarn.resourcemanager.ha.enabled",
-                                "item": self.getWarnItem(
-                                  "You must have at least 3 ZooKeeper Servers in your cluster to enable ResourceManager HA.")})
-
-    if 'ZOOKEEPER' not in servicesList or 'SLIDER' not in servicesList:
-      if 'hadoop.registry.rm.enabled' in yarn_site and \
-              'true' == yarn_site['hadoop.registry.rm.enabled']:
-        validationItems.append({"config-name": "hadoop.registry.rm.enabled",
-                                "item": self.getWarnItem(
-                                  "HADOOP resource manager registry can only be enabled if ZOOKEEPER and SLIDER are installed.")})
-
-    return self.toConfigurationValidationProblems(validationItems, "yarn-site")
-
   def validateYARNSiteConfigurationsFromHDP25(self, properties, recommendedDefaults, configurations, services, hosts):
     yarn_site_properties = self.getSiteProperties(configurations, "yarn-site")
     validationItems = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 54ddd89..726514b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -174,23 +174,12 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.hierarchy', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount-path', 'delete', 'true')
-    # recommend hadoop.registry.rm.enabled based on SLIDER and ZOOKEEPER in services
+    # recommend hadoop.registry.rm.enabled based on SLIDER in services
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if "SLIDER" in servicesList and "ZOOKEEPER" in servicesList:
+    if "SLIDER" in servicesList:
       putYarnProperty('hadoop.registry.rm.enabled', 'true')
     else:
       putYarnProperty('hadoop.registry.rm.enabled', 'false')
-    # recommend enabling RM and NM recovery if ZOOKEEPER in services
-    if "ZOOKEEPER" in servicesList:
-      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'true')
-      putYarnProperty('yarn.nodemanager.recovery.enabled', 'true')
-    else:
-      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'false')
-      putYarnProperty('yarn.nodemanager.recovery.enabled', 'false')
-      # recommend disabling RM HA if ZOOKEEPER is not in services
-      putYarnProperty('yarn.resourcemanager.ha.enabled', 'false')
-
-
 
   def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP22StackAdvisor, self).recommendHDFSConfigurations(configurations, clusterData, services, hosts)
@@ -1045,7 +1034,6 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
                "hadoop-env": self.validateHDFSConfigurationsEnv,
                "ranger-hdfs-plugin-properties": self.validateHDFSRangerPluginConfigurations},
       "YARN": {"yarn-env": self.validateYARNEnvConfigurations,
-               "yarn-site": self.validateYARNConfigurations,
                "ranger-yarn-plugin-properties": self.validateYARNRangerPluginConfigurations},
       "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
                "hive-site": self.validateHiveConfigurations,
@@ -1726,43 +1714,6 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
     return self.toConfigurationValidationProblems(validationItems, "ranger-storm-plugin-properties")
 
-  def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    parentValidationProblems = super(HDP22StackAdvisor, self).validateYARNConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    yarn_site = properties
-    validationItems = []
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    zk_hosts = self.getHostsForComponent(services, "ZOOKEEPER", "ZOOKEEPER_SERVER")
-    if len(zk_hosts) == 0:
-      # ZOOKEEPER_SERVER isn't assigned to at least one host
-      if 'yarn.resourcemanager.recovery.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.resourcemanager.recovery.enabled']:
-        validationItems.append({"config-name": "yarn.resourcemanager.recovery.enabled",
-                                "item": self.getWarnItem(
-                                  "YARN resource manager recovery can only be enabled if ZOOKEEPER is installed.")})
-      if 'yarn.nodemanager.recovery.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.nodemanager.recovery.enabled']:
-        validationItems.append({"config-name": "yarn.nodemanager.recovery.enabled",
-                                "item": self.getWarnItem(
-                                  "YARN node manager recovery can only be enabled if ZOOKEEPER is installed.")})
-
-    if len(zk_hosts) < 3:
-      if 'yarn.resourcemanager.ha.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.resourcemanager.ha.enabled']:
-        validationItems.append({"config-name": "yarn.resourcemanager.ha.enabled",
-                                "item": self.getWarnItem(
-                                  "You must have at least 3 ZooKeeper Servers in your cluster to enable ResourceManager HA.")})
-
-    if 'ZOOKEEPER' not in servicesList or 'SLIDER' not in servicesList:
-      if 'hadoop.registry.rm.enabled' in yarn_site and \
-              'true' == yarn_site['hadoop.registry.rm.enabled']:
-        validationItems.append({"config-name": "hadoop.registry.rm.enabled",
-                                "item": self.getWarnItem(
-                                  "HADOOP resource manager registry can only be enabled if ZOOKEEPER and SLIDER are installed.")})
-
-    validationProblems = self.toConfigurationValidationProblems(validationItems, "yarn-site")
-    validationProblems.extend(parentValidationProblems)
-    return validationProblems
-
   def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     parentValidationProblems = super(HDP22StackAdvisor, self).validateYARNEnvConfigurations(properties, recommendedDefaults, configurations, services, hosts)
     validationItems = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index ee620b5..571ff26 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -906,62 +906,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-vcores": "4",
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.nodemanager.resource.cpu-vcores": "4",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false"
-        }
-      }
-    }
-
-    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
-    self.assertEquals(configurations, expected)
-
-  def test_recommendYARNConfigurationsWithZKAndSlider(self):
-    configurations = {}
-    services = {"configurations": configurations}
-    services['services'] = [
-      {
-        "StackServices": {
-          "service_name": "ZOOKEEPER"
-        },
-        },
-      {
-        "StackServices": {
-          "service_name": "YARN"
-        },
-        },
-      {
-        "StackServices": {
-          "service_name": "SLIDER"
-        },
-        }
-    ]
-    clusterData = {
-      "cpu": 4,
-      "containers" : 5,
-      "ramPerContainer": 256,
-      "yarnMinContainerSize": 256
-    }
-    expected = {
-      "yarn-env": {
-        "properties": {
-          "min_user_id": "500",
-          'service_check.queue.name': 'default'
-        }
-      },
-      "yarn-site": {
-        "properties": {
-          "yarn.nodemanager.linux-container-executor.group": "hadoop",
-          "yarn.nodemanager.resource.memory-mb": "1280",
-          "yarn.scheduler.minimum-allocation-mb": "256",
-          "yarn.scheduler.maximum-allocation-mb": "1280",
-          "yarn.scheduler.maximum-allocation-vcores": "4",
-          "yarn.scheduler.minimum-allocation-vcores": "1",
-          "yarn.nodemanager.resource.cpu-vcores": "4",
-          "hadoop.registry.rm.enabled": "true",
-          "yarn.resourcemanager.recovery.enabled": "true",
-          "yarn.nodemanager.recovery.enabled": "true"
+          "hadoop.registry.rm.enabled": "true"
         }
       }
     }
@@ -969,55 +914,6 @@ class TestHDP22StackAdvisor(TestCase):
     self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
     self.assertEquals(configurations, expected)
 
-  def test_recommendYARNConfigurationsWithZK(self):
-    configurations = {}
-    services = {"configurations": configurations}
-    services['services'] = [
-      {
-        "StackServices": {
-          "service_name": "ZOOKEEPER"
-        },
-        },
-      {
-        "StackServices": {
-          "service_name": "YARN"
-        },
-        }
-    ]
-    clusterData = {
-      "cpu": 4,
-      "containers" : 5,
-      "ramPerContainer": 256,
-      "yarnMinContainerSize": 256
-    }
-    expected = {
-      "yarn-env": {
-        "properties": {
-          "min_user_id": "500",
-          'service_check.queue.name': 'default'
-        }
-      },
-      "yarn-site": {
-        "properties": {
-          "yarn.nodemanager.linux-container-executor.group": "hadoop",
-          "yarn.nodemanager.resource.memory-mb": "1280",
-          "yarn.scheduler.minimum-allocation-mb": "256",
-          "yarn.scheduler.maximum-allocation-mb": "1280",
-          "yarn.scheduler.maximum-allocation-vcores": "4",
-          "yarn.scheduler.minimum-allocation-vcores": "1",
-          "yarn.nodemanager.resource.cpu-vcores": "4",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "true",
-          "yarn.nodemanager.recovery.enabled": "true"
-        }
-      }
-    }
-
-    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
-    self.assertEquals(configurations, expected)
-
-
-
   def test_recommendSPARKConfigurations(self):
     configurations = {}
     services = {"configurations": configurations}
@@ -1083,10 +979,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1280",
           "yarn.nodemanager.resource.cpu-vcores": "2",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false"
+          "hadoop.registry.rm.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -1913,10 +1806,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1792",
           "yarn.nodemanager.resource.cpu-vcores": "1",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false"
+          "hadoop.registry.rm.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -2180,10 +2070,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1280",
           "yarn.nodemanager.resource.cpu-vcores": "1",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false"
+          "hadoop.registry.rm.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -2398,10 +2285,7 @@ class TestHDP22StackAdvisor(TestCase):
                 "yarn.scheduler.minimum-allocation-vcores": "1",
                 "yarn.scheduler.maximum-allocation-mb": "1280",
                 "yarn.nodemanager.resource.cpu-vcores": "1",
-                "hadoop.registry.rm.enabled": "false",
-                "yarn.resourcemanager.recovery.enabled": "false",
-                "yarn.nodemanager.recovery.enabled": "false",
-                "yarn.resourcemanager.ha.enabled": "false"
+                "hadoop.registry.rm.enabled": "false"
             },
             "property_attributes": {
                 'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -3960,9 +3844,6 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-mb": "33792",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
           "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false",
           "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
           "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
           "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",
@@ -4022,9 +3903,6 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-mb": "33792",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
           "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false",
           "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
           "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
           "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",

http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index 96a595f..d4d28c9 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -1153,9 +1153,6 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
-          'yarn.resourcemanager.recovery.enabled': 'false',
-          'yarn.nodemanager.recovery.enabled': 'false',
-          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.scheduler.minimum-allocation-vcores': '1',
           'yarn.scheduler.maximum-allocation-vcores': '4',
@@ -1332,9 +1329,6 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
-          'yarn.resourcemanager.recovery.enabled': 'false',
-          'yarn.nodemanager.recovery.enabled': 'false',
-          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
           'yarn.acl.enable': 'true',
@@ -1442,9 +1436,6 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
-          'yarn.resourcemanager.recovery.enabled': 'false',
-          'yarn.nodemanager.recovery.enabled': 'false',
-          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
           'yarn.acl.enable': 'true',


[30/33] ambari git commit: AMBARI-21370: Support VIPs instead of Host Names -- fix imports

Posted by rl...@apache.org.
AMBARI-21370: Support VIPs instead of Host Names -- fix imports


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aa7a8c65
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aa7a8c65
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aa7a8c65

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: aa7a8c657f07caa0be3db89d1e8146978d7d438c
Parents: 4d7cc7f
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Thu Jun 29 19:02:45 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Thu Jun 29 19:02:45 2017 +0200

----------------------------------------------------------------------
 .../org/apache/ambari/server/controller/jmx/JMXHostProvider.java | 4 +---
 .../apache/ambari/server/controller/jmx/JMXPropertyProvider.java | 1 -
 2 files changed, 1 insertion(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aa7a8c65/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
index dbf8eb7..4e48b53 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
@@ -17,12 +17,10 @@
  */
 package org.apache.ambari.server.controller.jmx;
 
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.state.Host;
-
 import java.util.Set;
 
 import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.state.Host;
 
 /**
  * Provider of JMX host information.

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa7a8c65/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
index e4de377..832d9ae 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
@@ -40,7 +40,6 @@ import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.utilities.StreamProvider;
-import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.services.MetricsRetrievalService;
 import org.apache.ambari.server.state.services.MetricsRetrievalService.MetricSourceType;
 import org.slf4j.Logger;


[32/33] ambari git commit: AMBARI-21369. Use JDK 8 compiler maven plugin for Log Search and Infra projects (oleewere)

Posted by rl...@apache.org.
AMBARI-21369. Use JDK 8 compiler maven plugin for Log Search and Infra projects (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d7c59fca
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d7c59fca
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d7c59fca

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: d7c59fca19770a8fb0e488371d2f460673f7e3d4
Parents: 7554509
Author: oleewere <ol...@gmail.com>
Authored: Fri Jun 30 11:33:21 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Fri Jun 30 11:35:08 2017 +0200

----------------------------------------------------------------------
 ambari-infra/ambari-infra-manager/pom.xml           |  1 -
 ambari-infra/ambari-infra-solr-plugin/pom.xml       |  4 ++--
 ambari-infra/pom.xml                                |  2 +-
 ambari-logsearch/ambari-logsearch-logfeeder/pom.xml |  4 ++--
 ambari-logsearch/ambari-logsearch-server/pom.xml    | 12 ++++++------
 ambari-logsearch/pom.xml                            |  5 +++--
 6 files changed, 14 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-infra/ambari-infra-manager/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/pom.xml b/ambari-infra/ambari-infra-manager/pom.xml
index b7708c2..b9f2a77 100644
--- a/ambari-infra/ambari-infra-manager/pom.xml
+++ b/ambari-infra/ambari-infra-manager/pom.xml
@@ -38,7 +38,6 @@
     <spring-data-solr.version>2.0.2.RELEASE</spring-data-solr.version>
     <jjwt.version>0.6.0</jjwt.version>
     <spring-batch.version>3.0.7.RELEASE</spring-batch.version>
-    <jdk.version>1.7</jdk.version>
     <sqlite.version>3.8.11.2</sqlite.version>
   </properties>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-infra/ambari-infra-solr-plugin/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-plugin/pom.xml b/ambari-infra/ambari-infra-solr-plugin/pom.xml
index c890cec..3337d99 100644
--- a/ambari-infra/ambari-infra-solr-plugin/pom.xml
+++ b/ambari-infra/ambari-infra-solr-plugin/pom.xml
@@ -47,8 +47,8 @@
         <artifactId>maven-compiler-plugin</artifactId>
         <version>3.3</version>
         <configuration>
-          <source>1.7</source>
-          <target>1.7</target>
+          <source>${jdk.version}</source>
+          <target>${jdk.version}</target>
         </configuration>
       </plugin>
     </plugins>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-infra/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/pom.xml b/ambari-infra/pom.xml
index 07adb3e..4f5c29c 100644
--- a/ambari-infra/pom.xml
+++ b/ambari-infra/pom.xml
@@ -30,7 +30,7 @@
   <packaging>pom</packaging>
 
   <properties>
-    <jdk.version>1.7</jdk.version>
+    <jdk.version>1.8</jdk.version>
     <solr.version>5.5.2</solr.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml b/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
index ae2150e..091f957 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
@@ -197,8 +197,8 @@
         <artifactId>maven-compiler-plugin</artifactId>
         <version>3.3</version>
         <configuration>
-          <source>1.7</source>
-          <target>1.7</target>
+          <source>${jdk.version}</source>
+          <target>${jdk.version}</target>
         </configuration>
       </plugin>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-logsearch/ambari-logsearch-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/pom.xml b/ambari-logsearch/ambari-logsearch-server/pom.xml
index fc4029b..ebca2d5 100755
--- a/ambari-logsearch/ambari-logsearch-server/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-server/pom.xml
@@ -63,8 +63,8 @@
             <artifactId>maven-compiler-plugin</artifactId>
             <version>3.0</version>
             <configuration>
-              <source>1.7</source>
-              <target>1.7</target>
+              <source>${jdk.version}</source>
+              <target>${jdk.version}</target>
             </configuration>
           </plugin>
           <!-- Exec main class plugin -->
@@ -196,8 +196,8 @@
             <artifactId>maven-compiler-plugin</artifactId>
             <version>3.0</version>
             <configuration>
-              <source>1.7</source>
-              <target>1.7</target>
+              <source>${jdk.version}</source>
+              <target>${jdk.version}</target>
             </configuration>
           </plugin>
           <!-- Exec main class plugin -->
@@ -337,8 +337,8 @@
             <artifactId>maven-compiler-plugin</artifactId>
             <version>3.0</version>
             <configuration>
-              <source>1.7</source>
-              <target>1.7</target>
+              <source>${jdk.version}</source>
+              <target>${jdk.version}</target>
             </configuration>
           </plugin>
           <!-- Exec main class plugin -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-logsearch/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/pom.xml b/ambari-logsearch/pom.xml
index af1dc6a..82943e4 100644
--- a/ambari-logsearch/pom.xml
+++ b/ambari-logsearch/pom.xml
@@ -39,6 +39,7 @@
     <module>ambari-logsearch-it</module>
   </modules>
   <properties>
+    <jdk.version>1.8</jdk.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
@@ -82,8 +83,8 @@
         <artifactId>maven-compiler-plugin</artifactId>
         <version>3.2</version>
         <configuration>
-          <source>1.7</source>
-          <target>1.7</target>
+          <source>${jdk.version}</source>
+          <target>${jdk.version}</target>
         </configuration>
       </plugin>
       <plugin>


[24/33] ambari git commit: AMBARI-21099. Drop JDK 7 support for Ambari Server and Ambari managed services (AMS, LogSearch, Infra) (oleewere)

Posted by rl...@apache.org.
AMBARI-21099. Drop JDK 7 support for Ambari Server and Ambari managed services (AMS, LogSearch, Infra) (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2f0de691
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2f0de691
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2f0de691

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 2f0de6919ba8bb43ae156d66b39335f9860d26b9
Parents: f4fb174
Author: oleewere <ol...@gmail.com>
Authored: Wed Jun 28 21:15:48 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Wed Jun 28 21:16:11 2017 +0200

----------------------------------------------------------------------
 ambari-server/docs/configuration/index.md       |  13 ++-
 .../ambari/server/agent/ExecutionCommand.java   |   4 +
 .../server/configuration/Configuration.java     |  65 ++++++++++--
 .../controller/AmbariActionExecutionHelper.java |   2 +
 .../AmbariCustomCommandExecutionHelper.java     |  12 +--
 .../AmbariManagementControllerImpl.java         |   2 +
 .../internal/ClientConfigResourceProvider.java  |   9 +-
 .../apache/ambari/server/utils/StageUtils.java  |  54 ++++++++++
 ambari-server/src/main/python/ambari-server.py  |   2 +
 .../python/ambari_server/serverConfiguration.py |   6 ++
 .../main/python/ambari_server/serverSetup.py    | 105 +++++++++++++++++--
 .../0.1.0/package/scripts/params.py             |   4 +-
 .../0.1.0/package/scripts/params.py             |  10 +-
 .../ATLAS/0.1.0.2.3/package/scripts/metadata.py |   8 +-
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |   1 +
 .../LOGSEARCH/0.5.0/package/scripts/params.py   |   4 +-
 .../RANGER/0.4.0/package/scripts/params.py      |   1 +
 .../0.4.0/package/scripts/setup_ranger_xml.py   |  10 +-
 .../2.0.6/hooks/before-ANY/scripts/params.py    |   2 +
 .../before-ANY/scripts/shared_initialization.py |  30 ++++--
 .../2.0.6/hooks/before-START/scripts/params.py  |   4 +
 .../scripts/shared_initialization.py            |  22 ++--
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  |   3 +
 .../before-ANY/scripts/shared_initialization.py |  31 ++++--
 .../3.0/hooks/before-START/scripts/params.py    |   4 +
 .../scripts/shared_initialization.py            |  22 ++--
 .../main/resources/stacks/HDP/3.0/metainfo.xml  |   2 +-
 .../PERF/1.0/hooks/before-ANY/scripts/params.py |   3 +
 .../before-ANY/scripts/shared_initialization.py |  23 +++-
 .../ClientConfigResourceProviderTest.java       |   8 ++
 .../ambari/server/utils/StageUtilsTest.java     |  99 +++++++++++++++++
 .../src/test/python/TestAmbariServer.py         |  49 ++++++++-
 32 files changed, 524 insertions(+), 90 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/docs/configuration/index.md
----------------------------------------------------------------------
diff --git a/ambari-server/docs/configuration/index.md b/ambari-server/docs/configuration/index.md
index 2394264..9dbe9c4 100644
--- a/ambari-server/docs/configuration/index.md
+++ b/ambari-server/docs/configuration/index.md
@@ -142,9 +142,9 @@ The following are the properties which can be used to configure Ambari.
 | http.x-content-type-options | The value that will be used to set the `X-CONTENT-TYPE` HTTP response header. |`nosniff` | 
 | http.x-frame-options | The value that will be used to set the `X-Frame-Options` HTTP response header. |`DENY` | 
 | http.x-xss-protection | The value that will be used to set the `X-XSS-Protection` HTTP response header. |`1; mode=block` | 
-| java.home | The location of the JDK on the Ambari Agent hosts.<br/><br/>The following are examples of valid values:<ul><li>`/usr/jdk64/jdk1.7.0_45`</ul> | | 
-| jce.name | The name of the JCE policy ZIP file. <br/><br/>The following are examples of valid values:<ul><li>`UnlimitedJCEPolicyJDK7.zip`</ul> | | 
-| jdk.name | The name of the JDK installation binary.<br/><br/>The following are examples of valid values:<ul><li>`jdk-7u45-linux-x64.tar.gz`</ul> | | 
+| java.home | The location of the JDK on the Ambari Agent hosts. If stack.java.home exists, that is only used by Ambari Server (or you can find that as ambari_java_home in the commandParams on the agent side)<br/><br/>The following are examples of valid values:<ul><li>`/usr/jdk64/jdk1.8.0_112`</ul> | | 
+| jce.name | The name of the JCE policy ZIP file. If stack.jce.name exists, that is only used by Ambari Server (or you can find that as ambari_jce_name in the commandParams on the agent side)<br/><br/>The following are examples of valid values:<ul><li>`UnlimitedJCEPolicyJDK8.zip`</ul> | | 
+| jdk.name | The name of the JDK installation binary. If stack.jdk.name exists, that is only used by Ambari Server (or you can find that as ambari_jdk_name in the commandParams on the agent side)<br/><br/>The following are examples of valid values:<ul><li>`jdk-8u112-linux-x64.tar.gz`</ul> | | 
 | kdcserver.connection.check.timeout | The timeout, in milliseconds, to wait when communicating with a Kerberos Key Distribution Center. |`10000` | 
 | kerberos.check.jaas.configuration | Determines whether Kerberos-enabled Ambari deployments should use JAAS to validate login credentials. |`false` | 
 | kerberos.keytab.cache.dir | The location on the Ambari Server where Kerberos keytabs are cached. |`/var/lib/ambari-server/data/cache` | 
@@ -161,6 +161,7 @@ The following are the properties which can be used to configure Ambari.
 | metrics.retrieval-service.request.ttl | The number of seconds to wait between issuing JMX or REST metric requests to the same endpoint. This property is used to throttle requests to the same URL being made too close together<br/><br/> This property is related to `metrics.retrieval-service.request.ttl.enabled`. |`5` | 
 | metrics.retrieval-service.request.ttl.enabled | Enables throttling requests to the same endpoint within a fixed amount of time. This property will prevent Ambari from making new metric requests to update the cache for URLs which have been recently retrieved.<br/><br/> This property is related to `metrics.retrieval-service.request.ttl`. |`true` | 
 | mpacks.staging.path | The Ambari Management Pack staging directory on the Ambari Server.<br/><br/>The following are examples of valid values:<ul><li>`/var/lib/ambari-server/resources/mpacks`</ul> | | 
+| notification.dispatch.alert.script.directory | The directory for scripts which are used by the alert notification dispatcher. |`/var/lib/ambari-server/resources/scripts` | 
 | packages.pre.installed | Determines whether Ambari Agent instances have already have the necessary stack software installed |`false` | 
 | pam.configuration | The PAM configuration file. | | 
 | property.mask.file | The path of the file which lists the properties that should be masked from the api that returns ambari.properties | | 
@@ -209,6 +210,7 @@ The following are the properties which can be used to configure Ambari.
 | server.ecCacheSize | The size of the cache which is used to hold current operations in memory until they complete. |`10000` | 
 | server.execution.scheduler.isClustered | Determines whether Quartz will use a clustered job scheduled when performing scheduled actions like rolling restarts. |`false` | 
 | server.execution.scheduler.maxDbConnections | The number of concurrent database connections that the Quartz job scheduler can use. |`5` | 
+| server.execution.scheduler.maxStatementsPerConnection | The maximum number of prepared statements cached per database connection. |`120` | 
 | server.execution.scheduler.maxThreads | The number of threads that the Quartz job scheduler will use when executing scheduled jobs. |`5` | 
 | server.execution.scheduler.misfire.toleration.minutes | The time, in minutes, that a scheduled job can be run after its missed scheduled execution time. |`480` | 
 | server.execution.scheduler.start.delay.seconds | The delay, in seconds, that a Quartz job must wait before it starts. |`120` | 
@@ -280,6 +282,10 @@ The following are the properties which can be used to configure Ambari.
 | ssl.trustStore.password | The password to use when setting the `javax.net.ssl.trustStorePassword` property | | 
 | ssl.trustStore.path | The location of the truststore to use when setting the `javax.net.ssl.trustStore` property. | | 
 | ssl.trustStore.type | The type of truststore used by the `javax.net.ssl.trustStoreType` property. | | 
+| stack.java.home | The location of the JDK on the Ambari Agent hosts for stack services.<br/><br/>The following are examples of valid values:<ul><li>`/usr/jdk64/jdk1.7.0_45`</ul> | | 
+| stack.java.version | JDK version of the stack, use in case of it differs from Ambari JDK version.<br/><br/>The following are examples of valid values:<ul><li>`1.7`</ul> | | 
+| stack.jce.name | The name of the JCE policy ZIP file for stack services.<br/><br/>The following are examples of valid values:<ul><li>`UnlimitedJCEPolicyJDK7.zip`</ul> | | 
+| stack.jdk.name | The name of the JDK installation binary for stack services.<br/><br/>The following are examples of valid values:<ul><li>`jdk-7u45-linux-x64.tar.gz`</ul> | | 
 | stack.upgrade.auto.retry.check.interval.secs | The amount of time to wait, in seconds, between checking for upgrade tasks to be retried. This value is only applicable if `stack.upgrade.auto.retry.timeout.mins` is positive.<br/><br/> This property is related to `stack.upgrade.auto.retry.timeout.mins`. |`20` | 
 | stack.upgrade.auto.retry.command.details.to.ignore | A comma-separate list of upgrade tasks details to skip when retrying failed commands automatically. |`"Execute HDFS Finalize"` | 
 | stack.upgrade.auto.retry.command.names.to.ignore | A comma-separate list of upgrade tasks names to skip when retrying failed commands automatically. |`"ComponentVersionCheckAction","FinalizeUpgradeAction"` | 
@@ -298,6 +304,7 @@ The following are the properties which can be used to configure Ambari.
 | views.ambari.request.connect.timeout.millis | The amount of time, in milliseconds, that a view will wait when trying to connect on HTTP(S) operations to the Ambari REST API. |`30000` | 
 | views.ambari.request.read.timeout.millis | The amount of time, in milliseconds, that a view will wait before terminating an HTTP(S) read request to the Ambari REST API. |`45000` | 
 | views.dir | The directory on the Ambari Server file system used for expanding Views and storing webapp work. |`/var/lib/ambari-server/resources/views` | 
+| views.directory.watcher.disable | Determines whether the view directory watcher service should be disabled. |`false` | 
 | views.http.cache-control | The value that will be used to set the `Cache-Control` HTTP response header for Ambari View requests. |`no-store` | 
 | views.http.charset | The value that will be used to set the Character encoding to HTTP response header for Ambari View requests. |`utf-8` | 
 | views.http.pragma | The value that will be used to set the `PRAGMA` HTTP response header for Ambari View requests. |`no-cache` | 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 60df8cf..29d28da 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -428,6 +428,10 @@ public class ExecutionCommand extends AgentCommand {
     String PACKAGE_LIST = "package_list";
     String JDK_LOCATION = "jdk_location";
     String JAVA_HOME = "java_home";
+    String AMBARI_JAVA_HOME = "ambari_java_home";
+    String AMBARI_JDK_NAME = "ambari_jdk_name";
+    String AMBARI_JCE_NAME = "ambari_jce_name";
+    String AMBARI_JAVA_VERSION = "ambari_java_version";
     String JAVA_VERSION = "java_version";
     String JDK_NAME = "jdk_name";
     String JCE_NAME = "jce_name";

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index fb06e6d..28f9d64 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -733,8 +733,8 @@ public class Configuration {
    * The location of the JDK on the Ambari Agent hosts.
    */
   @Markdown(
-      description = "The location of the JDK on the Ambari Agent hosts.",
-      examples = { "/usr/jdk64/jdk1.7.0_45" })
+      description = "The location of the JDK on the Ambari Agent hosts. If stack.java.home exists, that is only used by Ambari Server (or you can find that as ambari_java_home in the commandParams on the agent side)",
+      examples = { "/usr/jdk64/jdk1.8.0_112" })
   public static final ConfigurationProperty<String> JAVA_HOME = new ConfigurationProperty<>(
       "java.home", null);
 
@@ -742,8 +742,8 @@ public class Configuration {
    * The name of the JDK installation binary.
    */
   @Markdown(
-      description = "The name of the JDK installation binary.",
-      examples = { "jdk-7u45-linux-x64.tar.gz" })
+      description = "The name of the JDK installation binary. If stack.jdk.name exists, that is only used by Ambari Server (or you can find that as ambari_jdk_name in the commandParams on the agent side)",
+      examples = { "jdk-8u112-linux-x64.tar.gz" })
   public static final ConfigurationProperty<String> JDK_NAME = new ConfigurationProperty<>(
       "jdk.name", null);
 
@@ -751,12 +751,48 @@ public class Configuration {
    * The name of the JCE policy ZIP file.
    */
   @Markdown(
-      description = "The name of the JCE policy ZIP file. ",
-      examples = {"UnlimitedJCEPolicyJDK7.zip"})
+      description = "The name of the JCE policy ZIP file. If stack.jce.name exists, that is only used by Ambari Server (or you can find that as ambari_jce_name in the commandParams on the agent side)",
+      examples = {"UnlimitedJCEPolicyJDK8.zip"})
   public static final ConfigurationProperty<String> JCE_NAME = new ConfigurationProperty<>(
       "jce.name", null);
 
   /**
+   * The location of the JDK on the Ambari Agent hosts.
+   */
+  @Markdown(
+    description = "The location of the JDK on the Ambari Agent hosts for stack services.",
+    examples = { "/usr/jdk64/jdk1.7.0_45" })
+  public static final ConfigurationProperty<String> STACK_JAVA_HOME = new ConfigurationProperty<>(
+    "stack.java.home", null);
+
+  /**
+   * The name of the JDK installation binary.
+   */
+  @Markdown(
+    description = "The name of the JDK installation binary for stack services.",
+    examples = { "jdk-7u45-linux-x64.tar.gz" })
+  public static final ConfigurationProperty<String> STACK_JDK_NAME = new ConfigurationProperty<>(
+    "stack.jdk.name", null);
+
+  /**
+   * The name of the JCE policy ZIP file.
+   */
+  @Markdown(
+    description = "The name of the JCE policy ZIP file for stack services.",
+    examples = {"UnlimitedJCEPolicyJDK7.zip"})
+  public static final ConfigurationProperty<String> STACK_JCE_NAME = new ConfigurationProperty<>(
+    "stack.jce.name", null);
+
+  /**
+   * Java version of the stack
+   */
+  @Markdown(
+    description = "JDK version of the stack, use in case of it differs from Ambari JDK version.",
+    examples = {"1.7"})
+  public static final ConfigurationProperty<String> STACK_JAVA_VERSION = new ConfigurationProperty<>(
+    "stack.java.version", null);
+
+  /**
    * The auto group creation by Ambari.
    */
   @Markdown(
@@ -4128,6 +4164,23 @@ public class Configuration {
   public String getJCEName() {
     return getProperty(JCE_NAME);
   }
+
+  public String getStackJavaHome() {
+    return getProperty(STACK_JAVA_HOME);
+  }
+
+  public String getStackJDKName() {
+    return getProperty(STACK_JDK_NAME);
+  }
+
+  public String getStackJCEName() {
+    return getProperty(STACK_JCE_NAME);
+  }
+
+  public String getStackJavaVersion() {
+    return getProperty(STACK_JAVA_VERSION);
+  }
+
   public String getAmbariBlacklistFile() {
     return getProperty(PROPERTY_MASK_FILE);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 1b0e0e0..8f522b0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -60,6 +60,7 @@ import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
 import org.apache.ambari.server.utils.SecretReference;
+import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -418,6 +419,7 @@ public class AmbariActionExecutionHelper {
 
       commandParams.put(SCRIPT, actionName + ".py");
       commandParams.put(SCRIPT_TYPE, TYPE_PYTHON);
+      StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
 
       ExecutionCommand execCmd = stage.getExecutionCommandWrapper(hostName,
         actionContext.getActionName()).getExecutionCommand();

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 0473690..28aa4e4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -29,11 +29,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
@@ -508,7 +504,7 @@ public class AmbariCustomCommandExecutionHelper {
       if (isUpgradeSuspended) {
         cluster.addSuspendedUpgradeParameters(commandParams, roleParams);
       }
-
+      StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
       roleParams.put(COMPONENT_CATEGORY, componentInfo.getCategory());
 
       execCmd.setCommandParams(commandParams);
@@ -815,6 +811,7 @@ public class AmbariCustomCommandExecutionHelper {
     }
     commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
     commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+    StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
 
     execCmd.setCommandParams(commandParams);
 
@@ -1481,11 +1478,8 @@ public class AmbariCustomCommandExecutionHelper {
   Map<String, String> createDefaultHostParams(Cluster cluster, StackId stackId) throws AmbariException {
 
     TreeMap<String, String> hostLevelParams = new TreeMap<>();
+    StageUtils.useStackJdkIfExists(hostLevelParams, configs);
     hostLevelParams.put(JDK_LOCATION, managementController.getJdkResourceUrl());
-    hostLevelParams.put(JAVA_HOME, managementController.getJavaHome());
-    hostLevelParams.put(JAVA_VERSION, String.valueOf(configs.getJavaVersion()));
-    hostLevelParams.put(JDK_NAME, managementController.getJDKName());
-    hostLevelParams.put(JCE_NAME, managementController.getJCEName());
     hostLevelParams.put(STACK_NAME, stackId.getStackName());
     hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
     hostLevelParams.put(DB_NAME, managementController.getServerDB());

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 77883e3..5639dc1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -245,6 +245,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   private static final String BASE_LOG_DIR = "/tmp/ambari";
 
   private static final String PASSWORD = "password";
+
   public static final String SKIP_INSTALL_FOR_COMPONENTS = "skipInstallForComponents";
   public static final String DONT_SKIP_INSTALL_FOR_COMPONENTS = "dontSkipInstallForComponents";
 
@@ -2473,6 +2474,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     if (customCommandExecutionHelper.isTopologyRefreshRequired(roleCommand.name(), clusterName, serviceName)) {
       commandParams.put(ExecutionCommand.KeyNames.REFRESH_TOPOLOGY, "True");
     }
+    StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
 
     String repoInfo = customCommandExecutionHelper.getRepoInfo(cluster, component, host);
     if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index 21cf16c..bd445eb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -23,11 +23,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STA
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
@@ -363,11 +359,8 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
         osFamily = clusters.getHost(hostName).getOsFamily();
 
         TreeMap<String, String> hostLevelParams = new TreeMap<>();
+        StageUtils.useStackJdkIfExists(hostLevelParams, configs);
         hostLevelParams.put(JDK_LOCATION, managementController.getJdkResourceUrl());
-        hostLevelParams.put(JAVA_HOME, managementController.getJavaHome());
-        hostLevelParams.put(JAVA_VERSION, String.valueOf(configs.getJavaVersion()));
-        hostLevelParams.put(JDK_NAME, managementController.getJDKName());
-        hostLevelParams.put(JCE_NAME, managementController.getJCEName());
         hostLevelParams.put(STACK_NAME, stackId.getStackName());
         hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
         hostLevelParams.put(DB_NAME, managementController.getServerDB());

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index e7a94d4..9409f70 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -17,6 +17,15 @@
  */
 package org.apache.ambari.server.utils;
 
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_JAVA_HOME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_JAVA_VERSION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_JCE_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_JDK_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
+
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -82,6 +91,7 @@ public class StageUtils {
   protected static final String PORTS = "all_ping_ports";
   protected static final String RACKS = "all_racks";
   protected static final String IPV4_ADDRESSES = "all_ipv4_ips";
+
   private static Map<String, String> componentToClusterInfoKeyMap =
     new HashMap<>();
   private static Map<String, String> decommissionedToClusterInfoKeyMap =
@@ -598,4 +608,48 @@ public class StageUtils {
         startOfRange + separator + endOfRange;
     return rangeItem;
   }
+
+  /**
+   * Add ambari specific JDK details to command parameters.
+   */
+  public static void useAmbariJdkInCommandParams(Map<String, String> commandParams, Configuration configuration) {
+    if (StringUtils.isNotEmpty(configuration.getJavaHome()) && !configuration.getJavaHome().equals(configuration.getStackJavaHome())) {
+      commandParams.put(AMBARI_JAVA_HOME, configuration.getJavaHome());
+      commandParams.put(AMBARI_JAVA_VERSION, String.valueOf(configuration.getJavaVersion()));
+      if (StringUtils.isNotEmpty(configuration.getJDKName())) { // if not custom jdk
+        commandParams.put(AMBARI_JDK_NAME, configuration.getJDKName());
+      }
+      if (StringUtils.isNotEmpty(configuration.getJCEName())) { // if not custom jdk
+        commandParams.put(AMBARI_JCE_NAME, configuration.getJCEName());
+      }
+    }
+  }
+
+  /**
+   * Fill hots level parameters with Jdk details, override them with the stack JDK data, in case of stack JAVA_HOME exists
+   */
+  public static void useStackJdkIfExists(Map<String, String> hostLevelParams, Configuration configuration) {
+    // set defaults first
+    hostLevelParams.put(JAVA_HOME, configuration.getJavaHome());
+    hostLevelParams.put(JDK_NAME, configuration.getJDKName());
+    hostLevelParams.put(JCE_NAME, configuration.getJCEName());
+    hostLevelParams.put(JAVA_VERSION, String.valueOf(configuration.getJavaVersion()));
+    if (StringUtils.isNotEmpty(configuration.getStackJavaHome())
+      && !configuration.getStackJavaHome().equals(configuration.getJavaHome())) {
+      hostLevelParams.put(JAVA_HOME, configuration.getStackJavaHome());
+      if (StringUtils.isNotEmpty(configuration.getStackJavaVersion())) {
+        hostLevelParams.put(JAVA_VERSION, configuration.getStackJavaVersion());
+      }
+      if (StringUtils.isNotEmpty(configuration.getStackJDKName())) {
+        hostLevelParams.put(JDK_NAME, configuration.getStackJDKName());
+      } else {
+        hostLevelParams.put(JDK_NAME, null); // custom jdk for stack
+      }
+      if (StringUtils.isNotEmpty(configuration.getStackJCEName())) {
+        hostLevelParams.put(JCE_NAME, configuration.getStackJCEName());
+      } else {
+        hostLevelParams.put(JCE_NAME, null); // custom jdk for stack
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index c7bdcf9..5adcb04 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -482,6 +482,8 @@ def init_setup_parser_options(parser):
 
   other_group.add_option('-j', '--java-home', default=None,
                          help="Use specified java_home.  Must be valid on all hosts")
+  other_group.add_option('--stack-java-home', dest="stack_java_home", default=None,
+                    help="Use specified java_home for stack services.  Must be valid on all hosts")
   other_group.add_option('--skip-view-extraction', action="store_true", default=False, help="Skip extraction of system views", dest="skip_view_extraction")
   other_group.add_option('--postgresschema', default=None, help="Postgres database schema name",
                          dest="postgres_schema")

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/python/ambari_server/serverConfiguration.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index 4780338..631c9a7 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -87,6 +87,12 @@ JCE_NAME_PROPERTY = "jce.name"
 JDK_DOWNLOAD_SUPPORTED_PROPERTY = "jdk.download.supported"
 JCE_DOWNLOAD_SUPPORTED_PROPERTY = "jce.download.supported"
 
+# Stack JDK
+STACK_JAVA_HOME_PROPERTY = "stack.java.home"
+STACK_JDK_NAME_PROPERTY = "stack.jdk.name"
+STACK_JCE_NAME_PROPERTY = "stack.jce.name"
+STACK_JAVA_VERSION = "stack.java.version"
+
 
 #TODO property used incorrectly in local case, it was meant to be dbms name, not postgres database name,
 # has workaround for now, as we don't need dbms name if persistence_type=local

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/python/ambari_server/serverSetup.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverSetup.py b/ambari-server/src/main/python/ambari_server/serverSetup.py
index c6de088..5c016c5 100644
--- a/ambari-server/src/main/python/ambari_server/serverSetup.py
+++ b/ambari-server/src/main/python/ambari_server/serverSetup.py
@@ -41,7 +41,8 @@ from ambari_server.serverConfiguration import configDefaults, JDKRelease, \
   get_resources_location, get_value_from_properties, read_ambari_user, update_properties, validate_jdk, write_property, \
   JAVA_HOME, JAVA_HOME_PROPERTY, JCE_NAME_PROPERTY, JDBC_RCA_URL_PROPERTY, JDBC_URL_PROPERTY, \
   JDK_NAME_PROPERTY, JDK_RELEASES, NR_USER_PROPERTY, OS_FAMILY, OS_FAMILY_PROPERTY, OS_TYPE, OS_TYPE_PROPERTY, OS_VERSION, \
-  VIEWS_DIR_PROPERTY, JDBC_DATABASE_PROPERTY, JDK_DOWNLOAD_SUPPORTED_PROPERTY, JCE_DOWNLOAD_SUPPORTED_PROPERTY, SETUP_DONE_PROPERTIES
+  VIEWS_DIR_PROPERTY, JDBC_DATABASE_PROPERTY, JDK_DOWNLOAD_SUPPORTED_PROPERTY, JCE_DOWNLOAD_SUPPORTED_PROPERTY, SETUP_DONE_PROPERTIES, \
+  STACK_JAVA_HOME_PROPERTY, STACK_JDK_NAME_PROPERTY, STACK_JCE_NAME_PROPERTY, STACK_JAVA_VERSION
 from ambari_server.serverUtils import is_server_runing
 from ambari_server.setupSecurity import adjust_directory_permissions
 from ambari_server.userInput import get_YN_input, get_validated_string_input
@@ -79,7 +80,7 @@ UNTAR_JDK_ARCHIVE = "tar --no-same-owner -xvf {0}"
 JDK_PROMPT = "[{0}] {1}\n"
 JDK_VALID_CHOICES = "^[{0}{1:d}]$"
 
-
+JDK_VERSION_CHECK_CMD = """{0} -version 2>&1 | grep -i version | sed 's/.*version ".*\.\(.*\)\..*"/\\1/; 1q' 2>&1"""
 
 def get_supported_jdbc_drivers():
   factory = DBMSConfigFactory()
@@ -409,7 +410,7 @@ class JDKSetup(object):
   #
   # Downloads and installs the JDK and the JCE policy archive
   #
-  def download_and_install_jdk(self, args, properties):
+  def download_and_install_jdk(self, args, properties, ambariOnly = False):
     conf_file = properties.fileName
 
     jcePolicyWarn = "JCE Policy files are required for configuring Kerberos security. If you plan to use Kerberos," \
@@ -429,8 +430,22 @@ class JDKSetup(object):
       properties.removeOldProp(JDK_NAME_PROPERTY)
       properties.removeOldProp(JCE_NAME_PROPERTY)
 
+      if not ambariOnly:
+        properties.process_pair(STACK_JAVA_HOME_PROPERTY, args.java_home)
+        properties.removeOldProp(STACK_JDK_NAME_PROPERTY)
+        properties.removeOldProp(STACK_JCE_NAME_PROPERTY)
+
       self._ensure_java_home_env_var_is_set(args.java_home)
       self.jdk_index = self.custom_jdk_number
+
+      if args.stack_java_home: # reset stack specific jdk properties if stack_java_home exists
+        print 'Setting JAVA_HOME for stack services...'
+        print_warning_msg("JAVA_HOME " + args.stack_java_home + " (Stack) must be valid on ALL hosts")
+        print_warning_msg(jcePolicyWarn)
+        properties.process_pair(STACK_JAVA_HOME_PROPERTY, args.stack_java_home)
+        properties.removeOldProp(STACK_JDK_NAME_PROPERTY)
+        properties.removeOldProp(STACK_JCE_NAME_PROPERTY)
+
       return
 
     java_home_var = get_JAVA_HOME()
@@ -440,7 +455,10 @@ class JDKSetup(object):
       progress_func = download_progress
 
     if java_home_var:
-      change_jdk = get_YN_input("Do you want to change Oracle JDK [y/n] (n)? ", False)
+      message = "Do you want to change Oracle JDK [y/n] (n)? "
+      if ambariOnly:
+        message = "Do you want to change Oracle JDK for Ambari Server [y/n] (n)? "
+      change_jdk = get_YN_input(message, False)
       if not change_jdk:
         self._ensure_java_home_env_var_is_set(java_home_var)
         self.jdk_index = self.custom_jdk_number
@@ -448,7 +466,7 @@ class JDKSetup(object):
 
     #Continue with the normal setup, taking the first listed JDK version as the default option
     jdk_num = str(self.jdk_index + 1)
-    (self.jdks, jdk_choice_prompt, jdk_valid_choices, self.custom_jdk_number) = self._populate_jdk_configs(properties, jdk_num)
+    (self.jdks, jdk_choice_prompt, jdk_valid_choices, self.custom_jdk_number) = self._populate_jdk_configs(properties, jdk_num, ambariOnly)
 
     jdk_num = get_validated_string_input(
       jdk_choice_prompt,
@@ -478,10 +496,18 @@ class JDKSetup(object):
       properties.removeOldProp(JDK_NAME_PROPERTY)
       properties.removeOldProp(JCE_NAME_PROPERTY)
 
+      if not ambariOnly:
+        properties.process_pair(STACK_JAVA_HOME_PROPERTY, args.java_home)
+        properties.removeOldProp(STACK_JDK_NAME_PROPERTY)
+        properties.removeOldProp(STACK_JCE_NAME_PROPERTY)
+
       # Make sure any previously existing JDK and JCE name properties are removed. These will
       # confuse things in a Custom JDK scenario
       properties.removeProp(JDK_NAME_PROPERTY)
       properties.removeProp(JCE_NAME_PROPERTY)
+      if not ambariOnly:
+        properties.removeOldProp(STACK_JDK_NAME_PROPERTY)
+        properties.removeOldProp(STACK_JCE_NAME_PROPERTY)
 
       self._ensure_java_home_env_var_is_set(args.java_home)
       return
@@ -551,10 +577,13 @@ class JDKSetup(object):
 
     properties.process_pair(JDK_NAME_PROPERTY, jdk_cfg.dest_file)
     properties.process_pair(JAVA_HOME_PROPERTY, java_home_dir)
+    if not ambariOnly:
+      properties.process_pair(STACK_JDK_NAME_PROPERTY, jdk_cfg.dest_file)
+      properties.process_pair(STACK_JAVA_HOME_PROPERTY, java_home_dir)
 
     self._ensure_java_home_env_var_is_set(java_home_dir)
 
-  def download_and_unpack_jce_policy(self, properties):
+  def download_and_unpack_jce_policy(self, properties, ambariOnly = False):
     err_msg_stdout = "JCE Policy files are required for secure HDP setup. Please ensure " \
               " all hosts have the JCE unlimited strength policy 6, files."
 
@@ -563,7 +592,7 @@ class JDKSetup(object):
     jdk_cfg = self.jdks[self.jdk_index]
 
     try:
-      JDKSetup._download_jce_policy(jdk_cfg.jcpol_url, jdk_cfg.dest_jcpol_file, resources_dir, properties)
+      JDKSetup._download_jce_policy(jdk_cfg.jcpol_url, jdk_cfg.dest_jcpol_file, resources_dir, properties, ambariOnly)
     except FatalException, e:
       print err_msg_stdout
       print_error_msg("Failed to download JCE policy files:")
@@ -590,10 +619,22 @@ class JDKSetup(object):
     jce_zip_path = os.path.abspath(os.path.join(resources_dir, jce_packed_file))
     expand_jce_zip_file(jce_zip_path, jdk_security_path)
 
-  def _populate_jdk_configs(self, properties, jdk_num):
+  def _populate_jdk_configs(self, properties, jdk_num, ambariOnly = False):
+    def remove_jdk_condition(name):
+      """
+      Removes jdk1.7 from the default choices.
+      This method can be removed if JDK 7 support (for stack services) will be dropped.
+      """
+      if name != "jdk1.7":
+        return True
+      else:
+       print "JDK 7 detected. Removed from choices."
+       return False
     if properties.has_key(JDK_RELEASES):
       jdk_names = properties[JDK_RELEASES].split(',')
       jdk_names = filter(None, jdk_names)
+      if ambariOnly:
+        jdk_names = filter(lambda x : remove_jdk_condition(x), jdk_names)
       jdks = []
       for jdk_name in jdk_names:
         jdkR = JDKRelease.from_properties(properties, jdk_name)
@@ -630,7 +671,7 @@ class JDKSetup(object):
       raise FatalException(1, err)
 
   @staticmethod
-  def _download_jce_policy(jcpol_url, dest_jcpol_file, resources_dir, properties):
+  def _download_jce_policy(jcpol_url, dest_jcpol_file, resources_dir, properties, ambariOnly = False):
     dest_file = os.path.abspath(os.path.join(resources_dir, dest_jcpol_file))
 
     if not os.path.exists(dest_file):
@@ -653,6 +694,8 @@ class JDKSetup(object):
       print "JCE Policy archive already exists, using " + dest_file
 
     properties.process_pair(JCE_NAME_PROPERTY, dest_jcpol_file)
+    if not ambariOnly:
+      properties.process_pair(STACK_JCE_NAME_PROPERTY, dest_jcpol_file)
 
   # Base implementation, overriden in the subclasses
   def _install_jdk(self, java_inst_file, java_home_dir):
@@ -828,6 +871,14 @@ def download_and_install_jdk(options):
 
   update_properties(properties)
 
+  ambari_java_version_valid = check_ambari_java_version_is_valid(get_JAVA_HOME(), jdkSetup.JAVA_BIN, 8, properties)
+  if not ambari_java_version_valid:
+    jdkSetup = JDKSetup() # recreate object
+    jdkSetup.download_and_install_jdk(options, properties, True)
+    if jdkSetup.jdk_index != jdkSetup.custom_jdk_number:
+      jdkSetup.download_and_unpack_jce_policy(properties, True)
+    update_properties(properties)
+
   return 0
 
 
@@ -1200,7 +1251,43 @@ def setup_jce_policy(args):
   print 'NOTE: Restart Ambari Server to apply changes' + \
         ' ("ambari-server restart|stop|start")'
 
+def check_ambari_java_version_is_valid(java_home, java_bin, min_version, properties):
+  """
+  Check that ambari uses the proper (minimum) JDK with a shell command.
+  Returns true, if Ambari meets with the minimal JDK version requirement.
+  """
+  result = True
+  print 'Check JDK version for Ambari Server...'
+  try:
+    command = JDK_VERSION_CHECK_CMD.format(os.path.join(java_home, 'bin', java_bin))
+    process = subprocess.Popen(command,
+                               stdout=subprocess.PIPE,
+                               stdin=subprocess.PIPE,
+                               stderr=subprocess.PIPE,
+                               shell=True
+                               )
+    (out, err) = process.communicate()
+    if process.returncode != 0:
+      err = "Checking JDK version command returned with exit code %s" % process.returncode
+      raise FatalException(process.returncode, err)
+    else:
+      actual_jdk_version = int(out)
+      print 'JDK version found: {0}'.format(actual_jdk_version)
+      if actual_jdk_version < min_version:
+        print 'Minimum JDK version is {0} for Ambari. Setup JDK again only for Ambari Server.'.format(min_version)
+        properties.process_pair(STACK_JAVA_VERSION, out)
+        result = False
+      else:
+        print 'Minimum JDK version is {0} for Ambari. Skipping to setup different JDK for Ambari Server.'.format(min_version)
+
+  except FatalException as e:
+    err = 'Running java version check command failed: {0}. Exiting.'.format(e)
+    raise FatalException(e.code, err)
+  except Exception as e:
+    err = 'Running java version check command failed: {0}. Exiting.'.format(e)
+    raise FatalException(1, err)
 
+  return result
 #
 # Resets the Ambari Server.
 #

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
index 6eb3ba8..5f547f3 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
@@ -60,7 +60,9 @@ user_group = config['configurations']['cluster-env']['user_group']
 fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
 
 # shared configs
-java64_home = config['hostLevelParams']['java_home']
+java_home = config['hostLevelParams']['java_home']
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+java64_home = ambari_java_home if ambari_java_home is not None else java_home
 java_exec = format("{java64_home}/bin/java")
 zookeeper_hosts_list = config['clusterHostInfo']['zookeeper_hosts']
 zookeeper_hosts_list.sort()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index b8c14f4..486f568 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -203,9 +203,15 @@ security_enabled = False if not is_hbase_distributed else config['configurations
 # this is "hadoop-metrics.properties" for 1.x stacks
 metric_prop_file_name = "hadoop-metrics2-hbase.properties"
 
+java_home = config['hostLevelParams']['java_home']
+ambari_java_home = default("/commandParams/ambari_java_home", None)
 # not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
+java64_home = ambari_java_home if ambari_java_home is not None else java_home
+ambari_java_version = default("/commandParams/ambari_java_version", None)
+if ambari_java_version:
+  java_version = expect("/commandParams/ambari_java_version", int)
+else :
+  java_version = expect("/hostLevelParams/java_version", int)
 
 metrics_collector_heapsize = default('/configurations/ams-env/metrics_collector_heapsize', "512")
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
index 36c4598..d424f5b 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
@@ -207,7 +207,7 @@ def upload_conf_set(config_set, jaasFile):
       config_set_dir=format("{conf_dir}/solr"),
       config_set=config_set,
       tmp_dir=params.tmp_dir,
-      java64_home=params.java64_home,
+      java64_home=params.ambari_java_home,
       solrconfig_content=InlineTemplate(params.metadata_solrconfig_content),
       jaas_file=jaasFile,
       retry=30, interval=5)
@@ -220,7 +220,7 @@ def create_collection(collection, config_set, jaasFile):
       solr_znode=params.infra_solr_znode,
       collection = collection,
       config_set=config_set,
-      java64_home=params.java64_home,
+      java64_home=params.ambari_java_home,
       jaas_file=jaasFile,
       shards=params.atlas_solr_shards,
       replication_factor = params.infra_solr_replication_factor)
@@ -230,7 +230,7 @@ def secure_znode(znode, jaasFile):
   solr_cloud_util.secure_znode(config=params.config, zookeeper_quorum=params.zookeeper_quorum,
                                solr_znode=znode,
                                jaas_file=jaasFile,
-                               java64_home=params.java64_home, sasl_users=[params.atlas_jaas_principal])
+                               java64_home=params.ambari_java_home, sasl_users=[params.atlas_jaas_principal])
 
 
 
@@ -240,4 +240,4 @@ def check_znode():
   solr_cloud_util.check_znode(
     zookeeper_quorum=params.zookeeper_quorum,
     solr_znode=params.infra_solr_znode,
-    java64_home=params.java64_home)
+    java64_home=params.ambari_java_home)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
index d26df33..111a248 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
@@ -126,6 +126,7 @@ user_group = config['configurations']['cluster-env']['user_group']
 
 # metadata env
 java64_home = config['hostLevelParams']['java_home']
+ambari_java_home = default("/commandParams/ambari_java_home", java64_home)
 java_exec = format("{java64_home}/bin/java")
 env_sh_template = config['configurations']['atlas-env']['content']
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
index 662f49e..1b77999 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
@@ -74,7 +74,9 @@ logfeeder_pid_file = status_params.logfeeder_pid_file
 user_group = config['configurations']['cluster-env']['user_group']
 
 # shared configs
-java64_home = config['hostLevelParams']['java_home']
+java_home = config['hostLevelParams']['java_home']
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+java64_home = ambari_java_home if ambari_java_home is not None else java_home
 cluster_name = str(config['clusterName'])
 
 configurations = config['configurations'] # need reference inside logfeeder jinja templates

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
index 3789358..6d5581d 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
@@ -123,6 +123,7 @@ if stack_supports_ranger_tagsync:
 usersync_services_file = format('{stack_root}/current/ranger-usersync/ranger-usersync-services.sh')
 
 java_home = config['hostLevelParams']['java_home']
+ambari_java_home = default("/commandParams/ambari_java_home", java_home)
 unix_user  = config['configurations']['ranger-env']['ranger_user']
 unix_group = config['configurations']['ranger-env']['ranger_group']
 ranger_pid_dir = default("/configurations/ranger-env/ranger_pid_dir", "/var/run/ranger")

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
index ba21494..4bcf9b0 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
@@ -711,7 +711,7 @@ def setup_ranger_audit_solr():
         config_set = params.ranger_solr_config_set,
         config_set_dir = params.ranger_solr_conf,
         tmp_dir = params.tmp_dir,
-        java64_home = params.java_home,
+        java64_home = params.ambari_java_home,
         solrconfig_content = InlineTemplate(params.ranger_solr_config_content),
         jaas_file=params.solr_jaas_file,
         retry=30, interval=5
@@ -725,7 +725,7 @@ def setup_ranger_audit_solr():
         config_set = params.ranger_solr_config_set,
         config_set_dir = params.ranger_solr_conf,
         tmp_dir = params.tmp_dir,
-        java64_home = params.java_home,
+        java64_home = params.ambari_java_home,
         jaas_file=params.solr_jaas_file,
         retry=30, interval=5)
 
@@ -748,7 +748,7 @@ def setup_ranger_audit_solr():
       solr_znode = params.solr_znode,
       collection = params.ranger_solr_collection_name,
       config_set = params.ranger_solr_config_set,
-      java64_home = params.java_home,
+      java64_home = params.ambari_java_home,
       shards = params.ranger_solr_shards,
       replication_factor = int(params.replication_factor),
       jaas_file = params.solr_jaas_file)
@@ -774,14 +774,14 @@ def check_znode():
   solr_cloud_util.check_znode(
     zookeeper_quorum=params.zookeeper_quorum,
     solr_znode=params.solr_znode,
-    java64_home=params.java_home)
+    java64_home=params.ambari_java_home)
 
 def secure_znode(znode, jaasFile):
   import params
   solr_cloud_util.secure_znode(config=params.config, zookeeper_quorum=params.zookeeper_quorum,
                                solr_znode=znode,
                                jaas_file=jaasFile,
-                               java64_home=params.java_home, sasl_users=[params.ranger_admin_jaas_principal])
+                               java64_home=params.ambari_java_home, sasl_users=[params.ranger_admin_jaas_principal])
 
 def get_ranger_plugin_principals(services_defaults_tuple_list):
   """

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
index 4052d1d..1d79efb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
@@ -65,6 +65,8 @@ version = default("/commandParams/version", None)
 # Handle upgrade and downgrade
 if (upgrade_type is not None) and version:
   stack_version_formatted = format_stack_version(version)
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
 
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 1f17cd1..39f5a47 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -176,17 +176,26 @@ def setup_hadoop_env():
 
 def setup_java():
   """
-  Installs jdk using specific params, that comes from ambari-server
+  Install jdk using specific params.
+  Install ambari jdk as well if the stack and ambari jdk are different.
   """
   import params
+  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
+  if params.ambari_java_home and params.ambari_java_home != params.java_home:
+    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
 
-  java_exec = format("{java_home}/bin/java")
+def __setup_java(custom_java_home, custom_jdk_name):
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+  java_exec = format("{custom_java_home}/bin/java")
 
   if not os.path.isfile(java_exec):
     if not params.jdk_name: # if custom jdk is used.
       raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
 
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
     java_dir = os.path.dirname(params.java_home)
 
     Directory(params.artifact_dir,
@@ -194,13 +203,13 @@ def setup_java():
               )
 
     File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
          not_if = format("test -f {jdk_curl_target}")
-    )
+         )
 
     File(jdk_curl_target,
          mode = 0755,
-    )
+         )
 
     tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
 
@@ -213,7 +222,7 @@ def setup_java():
         install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
 
       Directory(java_dir
-      )
+                )
 
       Execute(chmod_cmd,
               sudo = True,
@@ -225,10 +234,11 @@ def setup_java():
     finally:
       Directory(tmp_java_dir, action="delete")
 
-    File(format("{java_home}/bin/java"),
+    File(format("{custom_java_home}/bin/java"),
          mode=0755,
          cd_access="a",
          )
     Execute(('chmod', '-R', '755', params.java_home),
-      sudo = True,
-    )
+            sudo = True,
+            )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 3488e75..49b0063 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -259,6 +259,10 @@ refresh_topology = False
 command_params = config["commandParams"] if "commandParams" in config else None
 if command_params is not None:
   refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+ambari_jce_name = default("/commandParams/ambari_jce_name", None)
   
 ambari_libs_dir = "/var/lib/ambari-agent/lib"
 is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
index 148d235..42785ba 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
@@ -197,9 +197,17 @@ def create_microsoft_r_dir():
     except Exception as exception:
       Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
 
-
 def setup_unlimited_key_jce_policy():
   """
+  Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the  stack use different JDK)
+  """
+  import params
+  __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip)
+  if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip:
+    __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name)
+
+def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name):
+  """
   Sets up the unlimited key JCE policy if needed.
 
   The following criteria must be met:
@@ -223,27 +231,27 @@ def setup_unlimited_key_jce_policy():
   if params.sysprep_skip_setup_jce:
     Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped")
 
-  elif not params.jdk_name:
+  elif not custom_jdk_name:
     Logger.debug("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari")
 
   elif not params.unlimited_key_jce_required:
     Logger.debug("Skipping unlimited key JCE policy check and setup since it is not required")
 
   else:
-    jcePolicyInfo = JcePolicyInfo(params.java_home)
+    jcePolicyInfo = JcePolicyInfo(custom_java_home)
 
     if jcePolicyInfo.is_unlimited_key_jce_policy():
       Logger.info("The unlimited key JCE policy is required, and appears to have been installed.")
 
-    elif params.jce_policy_zip is None:
+    elif custom_jce_name is None:
       raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.")
 
     else:
       Logger.info("The unlimited key JCE policy is required, and needs to be installed.")
 
-      jce_zip_target = format("{artifact_dir}/{jce_policy_zip}")
-      jce_zip_source = format("{ambari_server_resources_url}/{jce_policy_zip}")
-      java_security_dir = format("{java_home}/jre/lib/security")
+      jce_zip_target = format("{artifact_dir}/{custom_jce_name}")
+      jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}")
+      java_security_dir = format("{custom_java_home}/jre/lib/security")
 
       Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target))
       Directory(params.artifact_dir, create_parents=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
index 9be9101..eb5feae 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
@@ -65,6 +65,9 @@ version = default("/commandParams/version", None)
 if (upgrade_type is not None) and version:
   stack_version_formatted = format_stack_version(version)
 
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
index 5d79084..dbd1727 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -172,17 +172,26 @@ def setup_hadoop_env():
 
 def setup_java():
   """
-  Installs jdk using specific params, that comes from ambari-server
+  Install jdk using specific params.
+  Install ambari jdk as well if the stack and ambari jdk are different.
   """
   import params
+  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
+  if params.ambari_java_home and params.ambari_java_home != params.java_home:
+    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
 
-  java_exec = format("{java_home}/bin/java")
+def __setup_java(custom_java_home, custom_jdk_name):
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+  java_exec = format("{custom_java_home}/bin/java")
 
   if not os.path.isfile(java_exec):
     if not params.jdk_name: # if custom jdk is used.
       raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
 
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
     java_dir = os.path.dirname(params.java_home)
 
     Directory(params.artifact_dir,
@@ -190,9 +199,13 @@ def setup_java():
               )
 
     File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
          not_if = format("test -f {jdk_curl_target}")
-    )
+         )
+
+    File(jdk_curl_target,
+         mode = 0755,
+         )
 
     tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
 
@@ -205,7 +218,7 @@ def setup_java():
         install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
 
       Directory(java_dir
-      )
+                )
 
       Execute(chmod_cmd,
               sudo = True,
@@ -217,10 +230,10 @@ def setup_java():
     finally:
       Directory(tmp_java_dir, action="delete")
 
-    File(format("{java_home}/bin/java"),
+    File(format("{custom_java_home}/bin/java"),
          mode=0755,
          cd_access="a",
          )
     Execute(('chmod', '-R', '755', params.java_home),
-      sudo = True,
-    )
+            sudo = True,
+            )

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
index a3830f7..a0259af 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
@@ -243,6 +243,10 @@ refresh_topology = False
 command_params = config["commandParams"] if "commandParams" in config else None
 if command_params is not None:
   refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+ambari_jce_name = default("/commandParams/ambari_jce_name", None)
   
 ambari_libs_dir = "/var/lib/ambari-agent/lib"
 is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
index aed1124..5156dd4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
@@ -190,9 +190,17 @@ def create_microsoft_r_dir():
     except Exception as exception:
       Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
 
-
 def setup_unlimited_key_jce_policy():
   """
+  Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the  stack use different JDK)
+  """
+  import params
+  __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip)
+  if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip:
+    __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name)
+
+def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name):
+  """
   Sets up the unlimited key JCE policy if needed.
 
   The following criteria must be met:
@@ -216,27 +224,27 @@ def setup_unlimited_key_jce_policy():
   if params.sysprep_skip_setup_jce:
     Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped")
 
-  elif not params.jdk_name:
+  elif not custom_jdk_name:
     Logger.debug("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari")
 
   elif not params.unlimited_key_jce_required:
     Logger.debug("Skipping unlimited key JCE policy check and setup since it is not required")
 
   else:
-    jcePolicyInfo = JcePolicyInfo(params.java_home)
+    jcePolicyInfo = JcePolicyInfo(custom_java_home)
 
     if jcePolicyInfo.is_unlimited_key_jce_policy():
       Logger.info("The unlimited key JCE policy is required, and appears to have been installed.")
 
-    elif params.jce_policy_zip is None:
+    elif custom_jce_name is None:
       raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.")
 
     else:
       Logger.info("The unlimited key JCE policy is required, and needs to be installed.")
 
-      jce_zip_target = format("{artifact_dir}/{jce_policy_zip}")
-      jce_zip_source = format("{ambari_server_resources_url}/{jce_policy_zip}")
-      java_security_dir = format("{java_home}/jre/lib/security")
+      jce_zip_target = format("{artifact_dir}/{custom_jce_name}")
+      jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}")
+      java_security_dir = format("{custom_java_home}/jre/lib/security")
 
       Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target))
       Directory(params.artifact_dir, create_parents=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
index 0364d41..9af03c0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
@@ -19,6 +19,6 @@
   <versions>
     <active>true</active>
   </versions>
-  <minJdk>1.7</minJdk>
+  <minJdk>1.8</minJdk>
   <maxJdk>1.8</maxJdk>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
index 2c2c901..e0e78b9 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
@@ -39,6 +39,9 @@ artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 jdk_location = config['hostLevelParams']['jdk_location']
 java_version = expect("/hostLevelParams/java_version", int)
 
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+
 service_name = config["serviceName"]
 component_name = config["role"]
 sudo = AMBARI_SUDO_BINARY
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/shared_initialization.py
index 7dc1a48..0aae910 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -40,17 +40,26 @@ def setup_users():
 
 def setup_java():
   """
-  Installs jdk using specific params, that comes from ambari-server
+  Install jdk using specific params.
+  Install ambari jdk as well if the stack and ambari jdk are different.
   """
   import params
+  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
+  if params.ambari_java_home and params.ambari_java_home != params.java_home:
+    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
 
-  java_exec = format("{java_home}/bin/java")
+def __setup_java(custom_java_home, custom_jdk_name):
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+  java_exec = format("{custom_java_home}/bin/java")
 
   if not os.path.isfile(java_exec):
     if not params.jdk_name: # if custom jdk is used.
       raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
 
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
     java_dir = os.path.dirname(params.java_home)
 
     Directory(params.artifact_dir,
@@ -58,10 +67,14 @@ def setup_java():
               )
 
     File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
          not_if = format("test -f {jdk_curl_target}")
          )
 
+    File(jdk_curl_target,
+         mode = 0755,
+         )
+
     tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
 
     try:
@@ -85,7 +98,7 @@ def setup_java():
     finally:
       Directory(tmp_java_dir, action="delete")
 
-    File(format("{java_home}/bin/java"),
+    File(format("{custom_java_home}/bin/java"),
          mode=0755,
          cd_access="a",
          )

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
index 29b0476..1f906ad 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
@@ -271,7 +271,11 @@ public class ClientConfigResourceProviderTest {
     expect(configMap.get(Configuration.AMBARI_PYTHON_WRAP.getKey())).andReturn(Configuration.AMBARI_PYTHON_WRAP.getDefaultValue());
     expect(configuration.getConfigsMap()).andReturn(returnConfigMap);
     expect(configuration.getResourceDirPath()).andReturn(stackRoot);
+    expect(configuration.getJavaHome()).andReturn("dummy_java_home");
+    expect(configuration.getJDKName()).andReturn(null);
+    expect(configuration.getJCEName()).andReturn(null);
     expect(configuration.getJavaVersion()).andReturn(8);
+    expect(configuration.getStackJavaHome()).andReturn(null);
     expect(configuration.areHostsSysPrepped()).andReturn("false");
     expect(configuration.isAgentStackRetryOnInstallEnabled()).andReturn("false");
     expect(configuration.getAgentStackRetryOnInstallCount()).andReturn("5");
@@ -524,7 +528,11 @@ public class ClientConfigResourceProviderTest {
     expect(configMap.get(Configuration.AMBARI_PYTHON_WRAP.getKey())).andReturn(Configuration.AMBARI_PYTHON_WRAP.getDefaultValue());
     expect(configuration.getConfigsMap()).andReturn(returnConfigMap);
     expect(configuration.getResourceDirPath()).andReturn("/var/lib/ambari-server/src/main/resources");
+    expect(configuration.getJavaHome()).andReturn("dummy_java_home");
+    expect(configuration.getJDKName()).andReturn(null);
+    expect(configuration.getJCEName()).andReturn(null);
     expect(configuration.getJavaVersion()).andReturn(8);
+    expect(configuration.getStackJavaHome()).andReturn(null);
     expect(configuration.areHostsSysPrepped()).andReturn("false");
     expect(configuration.isAgentStackRetryOnInstallEnabled()).andReturn("false");
     expect(configuration.getAgentStackRetryOnInstallCount()).andReturn("5");

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
index b1cce55..c3b820b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
@@ -22,6 +22,7 @@ import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.getCurrentArguments;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayInputStream;
@@ -615,6 +616,104 @@ public class StageUtilsTest extends EasyMockSupport {
     }
   }
 
+  @Test
+  public void testUseAmbariJdkWithoutavaHome() {
+    // GIVEN
+    Map<String, String> commandParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    // WHEN
+    StageUtils.useAmbariJdkInCommandParams(commandParams, configuration);
+    // THEN
+    assertTrue(commandParams.isEmpty());
+  }
+
+  @Test
+  public void testUseAmbariJdkWithCustomJavaHome() {
+    // GIVEN
+    Map<String, String> commandParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    configuration.setProperty("java.home", "myJavaHome");
+    // WHEN
+    StageUtils.useAmbariJdkInCommandParams(commandParams, configuration);
+    // THEN
+    assertEquals("myJavaHome", commandParams.get("ambari_java_home"));
+    assertEquals(2, commandParams.size());
+  }
+
+  @Test
+  public void testUseAmbariJdk() {
+    // GIVEN
+    Map<String, String> commandParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    configuration.setProperty("java.home", "myJavaHome");
+    configuration.setProperty("jdk.name", "myJdkName");
+    configuration.setProperty("jce.name", "myJceName");
+    // WHEN
+    StageUtils.useAmbariJdkInCommandParams(commandParams, configuration);
+    // THEN
+    assertEquals("myJavaHome", commandParams.get("ambari_java_home"));
+    assertEquals("myJdkName", commandParams.get("ambari_jdk_name"));
+    assertEquals("myJceName", commandParams.get("ambari_jce_name"));
+    assertEquals(4, commandParams.size());
+  }
+
+  @Test
+  public void testUseStackJdkIfExistsWithCustomStackJdk() {
+    // GIVEN
+    Map<String, String> hostLevelParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    configuration.setProperty("java.home", "myJavaHome");
+    configuration.setProperty("jdk.name", "myJdkName");
+    configuration.setProperty("jce.name", "myJceName");
+    configuration.setProperty("stack.java.home", "myStackJavaHome");
+    // WHEN
+    StageUtils.useStackJdkIfExists(hostLevelParams, configuration);
+    // THEN
+    assertEquals("myStackJavaHome", hostLevelParams.get("java_home"));
+    assertNull(hostLevelParams.get("jdk_name"));
+    assertNull(hostLevelParams.get("jce_name"));
+    assertEquals(4, hostLevelParams.size());
+  }
+
+  @Test
+  public void testUseStackJdkIfExists() {
+    // GIVEN
+    Map<String, String> hostLevelParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    configuration.setProperty("java.home", "myJavaHome");
+    configuration.setProperty("jdk.name", "myJdkName");
+    configuration.setProperty("jce.name", "myJceName");
+    configuration.setProperty("stack.java.home", "myStackJavaHome");
+    configuration.setProperty("stack.jdk.name", "myStackJdkName");
+    configuration.setProperty("stack.jce.name", "myStackJceName");
+    configuration.setProperty("stack.java.version", "7");
+    // WHEN
+    StageUtils.useStackJdkIfExists(hostLevelParams, configuration);
+    // THEN
+    assertEquals("myStackJavaHome", hostLevelParams.get("java_home"));
+    assertEquals("myStackJdkName", hostLevelParams.get("jdk_name"));
+    assertEquals("myStackJceName", hostLevelParams.get("jce_name"));
+    assertEquals("7", hostLevelParams.get("java_version"));
+    assertEquals(4, hostLevelParams.size());
+  }
+
+  @Test
+  public void testUseStackJdkIfExistsWithoutStackJdk() {
+    // GIVEN
+    Map<String, String> hostLevelParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    configuration.setProperty("java.home", "myJavaHome");
+    configuration.setProperty("jdk.name", "myJdkName");
+    configuration.setProperty("jce.name", "myJceName");
+    // WHEN
+    StageUtils.useStackJdkIfExists(hostLevelParams, configuration);
+    // THEN
+    assertEquals("myJavaHome", hostLevelParams.get("java_home"));
+    assertEquals("myJdkName", hostLevelParams.get("jdk_name"));
+    assertEquals("myJceName", hostLevelParams.get("jce_name"));
+    assertEquals(4, hostLevelParams.size());
+  }
+
   private void checkServiceHostIndexes(Map<String, Set<String>> info, String componentName, String mappedComponentName,
                                        Map<String, Collection<String>> serviceTopology, List<String> hostList) {
     Set<Integer> expectedHostsList = new HashSet<>();

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 8c135c3..c511237 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -108,11 +108,11 @@ with patch.object(platform, "linux_distribution", return_value = MagicMock(retur
                   get_pass_file_path, GET_FQDN_SERVICE_URL, JDBC_USE_INTEGRATED_AUTH_PROPERTY, SECURITY_KEY_ENV_VAR_NAME, \
                   JAVA_HOME_PROPERTY, JDK_NAME_PROPERTY, JCE_NAME_PROPERTY, STACK_LOCATION_KEY, SERVER_VERSION_FILE_PATH, \
                   COMMON_SERVICES_PATH_PROPERTY, WEBAPP_DIR_PROPERTY, SHARED_RESOURCES_DIR, BOOTSTRAP_SCRIPT, \
-                  CUSTOM_ACTION_DEFINITIONS, BOOTSTRAP_SETUP_AGENT_SCRIPT, STACKADVISOR_SCRIPT, BOOTSTRAP_DIR_PROPERTY, MPACKS_STAGING_PATH_PROPERTY
+                  CUSTOM_ACTION_DEFINITIONS, BOOTSTRAP_SETUP_AGENT_SCRIPT, STACKADVISOR_SCRIPT, BOOTSTRAP_DIR_PROPERTY, MPACKS_STAGING_PATH_PROPERTY, STACK_JAVA_VERSION
                 from ambari_server.serverUtils import is_server_runing, refresh_stack_hash
                 from ambari_server.serverSetup import check_selinux, check_ambari_user, proceedJDBCProperties, SE_STATUS_DISABLED, SE_MODE_ENFORCING, configure_os_settings, \
                   download_and_install_jdk, prompt_db_properties, setup, \
-                  AmbariUserChecks, AmbariUserChecksLinux, AmbariUserChecksWindows, JDKSetup, reset, setup_jce_policy, expand_jce_zip_file
+                  AmbariUserChecks, AmbariUserChecksLinux, AmbariUserChecksWindows, JDKSetup, reset, setup_jce_policy, expand_jce_zip_file, check_ambari_java_version_is_valid
                 from ambari_server.serverUpgrade import upgrade, change_objects_owner, \
                   run_schema_upgrade, move_user_custom_actions, find_and_copy_custom_services
                 from ambari_server.setupHttps import is_valid_https_port, setup_https, import_cert_and_key_action, get_fqdn, \
@@ -2811,9 +2811,10 @@ class TestAmbariServer(TestCase):
   @patch("ambari_server.serverSetup.get_JAVA_HOME")
   @patch("ambari_server.serverSetup.get_resources_location")
   @patch("ambari_server.serverSetup.get_ambari_properties")
+  @patch("ambari_server.serverSetup.check_ambari_java_version_is_valid")
   @patch("shutil.copyfile")
   @patch("sys.exit")
-  def test_download_jdk(self, exit_mock, copyfile_mock, get_ambari_properties_mock, get_resources_location_mock, get_JAVA_HOME_mock, \
+  def test_download_jdk(self, exit_mock, copyfile_mock, check_ambari_java_version_is_valid_mock, get_ambari_properties_mock, get_resources_location_mock, get_JAVA_HOME_mock, \
                         validate_jdk_mock, print_info_msg_mock, get_validated_string_input_mock, update_properties_mock, \
                         run_os_command_mock, get_YN_input_mock, force_download_file_mock, expand_jce_zip_file_mock,
                         adjust_jce_permissions_mock, os_makedirs_mock,
@@ -2874,6 +2875,7 @@ class TestAmbariServer(TestCase):
     get_JAVA_HOME_mock.return_value = False
     read_ambari_user_mock.return_value = "ambari"
     get_ambari_properties_mock.return_value = p
+    check_ambari_java_version_is_valid_mock.return_value = True
     # Test case: ambari.properties not found
     try:
       download_and_install_jdk(args)
@@ -3166,6 +3168,47 @@ class TestAmbariServer(TestCase):
     pass
 
   @not_for_platform(PLATFORM_WINDOWS)
+  @patch("subprocess.Popen")
+  def test_check_ambari_java_version_is_valid(self, popenMock):
+    # case 1:  jdk7 is picked for stacks
+    properties = Properties()
+    p = MagicMock()
+    p.communicate.return_value = ('7', None)
+    p.returncode = 0
+    popenMock.return_value = p
+    result = check_ambari_java_version_is_valid('/usr/jdk64/jdk_1.7.0/', 'java', 8, properties)
+    self.assertEqual(properties.get_property(STACK_JAVA_VERSION), "7")
+    self.assertFalse(result)
+
+    # case 2: jdk8 is picked for stacks
+    properties = Properties()
+    p.communicate.return_value = ('8', None)
+    p.returncode = 0
+    result = check_ambari_java_version_is_valid('/usr/jdk64/jdk_1.8.0/', 'java', 8, properties)
+    self.assertFalse(properties.get_property(STACK_JAVA_VERSION))
+    self.assertTrue(result)
+
+    # case 3: return code is not 0
+    p.returncode = 1
+    try:
+      check_ambari_java_version_is_valid('/usr/jdk64/jdk_1.8.0/', 'java', 8, properties)
+      self.fail("Should throw exception")
+    except FatalException:
+      # expected
+      pass
+
+    # case 4: unparseable response - type error
+    p.communicate.return_value = ('something else', None)
+    p.returncode = 0
+    try:
+      check_ambari_java_version_is_valid('/usr/jdk64/jdk_1.8.0/', 'java', 8, properties)
+      self.fail("Should throw exception")
+    except FatalException as e:
+      # expected
+      self.assertEqual(e.code, 1)
+      pass
+
+  @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(LinuxDBMSConfig, "_setup_remote_server")
   @patch("ambari_server.dbConfiguration_linux.print_info_msg")


[07/33] ambari git commit: AMBARI-21352.Workflow Manager view build failure(Venkata Sairam)

Posted by rl...@apache.org.
AMBARI-21352.Workflow Manager view build failure(Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1e295908
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1e295908
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1e295908

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 1e2959086fa2151888f7c1cfddaf3c6a2cedb25f
Parents: b1a1543
Author: Venkata Sairam <ve...@gmail.com>
Authored: Tue Jun 27 15:53:12 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Tue Jun 27 15:53:12 2017 +0530

----------------------------------------------------------------------
 contrib/views/wfmanager/src/main/resources/ui/bower.json | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1e295908/contrib/views/wfmanager/src/main/resources/ui/bower.json
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/bower.json b/contrib/views/wfmanager/src/main/resources/ui/bower.json
index 06fc3e3..3f9de44 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/bower.json
+++ b/contrib/views/wfmanager/src/main/resources/ui/bower.json
@@ -21,11 +21,11 @@
     "abdmob/x2js": "~1.2.0",
     "datatables": "~1.10.11",
     "vkBeautify": "https://github.com/vkiryukhin/vkBeautify.git",
-    "cytoscape": "~2.7.7",
+    "cytoscape": "2.7.20",
     "cytoscape-dagre": "~1.3.0",
     "cytoscape-panzoom": "~2.4.0",
     "codemirror": "~5.15.0",
     "fuse.js": "~2.5.0",
-    "jsog":"1.0.7"
+    "jsog": "1.0.7"
   }
 }


[22/33] ambari git commit: AMBARI-21362. Ambari upgrade not idempotent due to column move

Posted by rl...@apache.org.
AMBARI-21362. Ambari upgrade not idempotent due to column move


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5c874ccb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5c874ccb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5c874ccb

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 5c874ccb40b282e5074588906cb7de1f7eeae614
Parents: a3681c0
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Tue Jun 27 14:33:27 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Wed Jun 28 18:31:49 2017 +0200

----------------------------------------------------------------------
 .../ambari/server/orm/DBAccessorImpl.java       |  5 +++-
 .../ambari/server/orm/DBAccessorImplTest.java   | 29 ++++++++++++++++++++
 2 files changed, 33 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5c874ccb/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
index 13e7d7d..83ea8e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
@@ -1411,7 +1411,10 @@ public class DBAccessorImpl implements DBAccessor {
   public void moveColumnToAnotherTable(String sourceTableName, DBColumnInfo sourceColumn, String sourceIDFieldName,
               String targetTableName, DBColumnInfo targetColumn, String targetIDFieldName, Object initialValue) throws SQLException {
 
-    if (this.tableHasColumn(sourceTableName, sourceIDFieldName)) {
+    if (tableHasColumn(sourceTableName, sourceIDFieldName) &&
+      tableHasColumn(sourceTableName, sourceColumn.getName()) &&
+      tableHasColumn(targetTableName, targetIDFieldName)
+    ) {
 
       final String moveSQL = dbmsHelper.getCopyColumnToAnotherTableStatement(sourceTableName, sourceColumn.getName(),
         sourceIDFieldName, targetTableName, targetColumn.getName(),targetIDFieldName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5c874ccb/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
index ca2674c..b4ffbf1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
@@ -638,4 +638,33 @@ public class DBAccessorImplTest {
     }
 
    }
+
+  @Test
+  public void testMoveNonexistentColumnIsNoop() throws Exception {
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+    String sourceTableName = getFreeTableName();
+    String targetTableName = getFreeTableName();
+    int testRowAmount = 10;
+
+    createMyTable(sourceTableName, "col1");
+    createMyTable(targetTableName, "col1", "col2");
+
+    for (Integer i=0; i < testRowAmount; i++){
+      dbAccessor.insertRow(sourceTableName,
+        new String[] {"id", "col1"},
+        new String[]{i.toString(), String.format("'source,1,%s'", i)}, false);
+
+      dbAccessor.insertRow(targetTableName,
+        new String[] {"id", "col1", "col2"},
+        new String[]{i.toString(), String.format("'target,1,%s'", i), String.format("'target,2,%s'", i)}, false);
+    }
+
+    DBColumnInfo sourceColumn = new DBColumnInfo("col2", String.class, null, null, false);
+    DBColumnInfo targetColumn = new DBColumnInfo("col2", String.class, null, null, false);
+
+    dbAccessor.moveColumnToAnotherTable(sourceTableName, sourceColumn, "id",
+      targetTableName, targetColumn, "id", "initial");
+
+    // should not result in exception due to unknown column in source table
+  }
 }


[06/33] ambari git commit: AMBARI-21388.Styling Issues with newly implemented workflow manager file browser(Venkata Sairam)

Posted by rl...@apache.org.
AMBARI-21388.Styling Issues with newly implemented workflow manager file browser(Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b1a15435
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b1a15435
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b1a15435

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: b1a154358078b02a4d84bbf865ff0209c1912e87
Parents: 9833bc1
Author: Venkata Sairam <ve...@gmail.com>
Authored: Tue Jun 27 14:58:59 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Tue Jun 27 14:59:25 2017 +0530

----------------------------------------------------------------------
 .../src/main/resources/ui/app/styles/app.less   |  4 +-
 .../hdfs-directory-viewer/addon/styles/app.css  |  1 +
 .../wfmanager/src/main/resources/ui/yarn.lock   | 68 +++++++++++++++-----
 3 files changed, 54 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b1a15435/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less b/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
index 597e2e8..9a35aca 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
@@ -1060,8 +1060,8 @@ input:invalid {
   width: 100%;
 }
 .hdfs-browse{
-  height: 500px;
-  max-height: 500px;
+  height: 350px;
+  max-height: 350px;
   overflow: scroll;
 }
 #wf_title{

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1a15435/contrib/views/wfmanager/src/main/resources/ui/externaladdons/hdfs-directory-viewer/addon/styles/app.css
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/externaladdons/hdfs-directory-viewer/addon/styles/app.css b/contrib/views/wfmanager/src/main/resources/ui/externaladdons/hdfs-directory-viewer/addon/styles/app.css
index b46fa34..e0b4463 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/externaladdons/hdfs-directory-viewer/addon/styles/app.css
+++ b/contrib/views/wfmanager/src/main/resources/ui/externaladdons/hdfs-directory-viewer/addon/styles/app.css
@@ -83,6 +83,7 @@
 	width:500px;
 	position:relative;
 	overflow:auto;
+	float:left;
 }
 .directory-viewer .padding-left-10px {
 	padding-left: 10px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1a15435/contrib/views/wfmanager/src/main/resources/ui/yarn.lock
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/yarn.lock b/contrib/views/wfmanager/src/main/resources/ui/yarn.lock
index e9ad6cc..f3602c9 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/yarn.lock
+++ b/contrib/views/wfmanager/src/main/resources/ui/yarn.lock
@@ -66,10 +66,6 @@ amdefine@>=0.0.4:
   version "1.0.1"
   resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
 
-ansi-regex@*, ansi-regex@^2.0.0:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
-
 ansi-regex@^0.2.0, ansi-regex@^0.2.1:
   version "0.2.1"
   resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
@@ -78,6 +74,10 @@ ansi-regex@^1.0.0:
   version "1.1.1"
   resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-1.1.1.tgz#41c847194646375e6a1a5d10c3ca054ef9fc980d"
 
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
 ansi-styles@^1.1.0:
   version "1.1.0"
   resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
@@ -1111,6 +1111,19 @@ broccoli-file-creator@^1.0.0, broccoli-file-creator@^1.0.1:
     rsvp "~3.0.6"
     symlink-or-copy "^1.0.1"
 
+broccoli-filter@^0.1.6:
+  version "0.1.14"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-0.1.14.tgz#23cae3891ff9ebb7b4d7db00c6dcf03535daf7ad"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.6"
+    broccoli-writer "^0.1.1"
+    mkdirp "^0.3.5"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rsvp "^3.0.16"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.1.3"
+
 broccoli-filter@^1.2.2, broccoli-filter@^1.2.3:
   version "1.2.4"
   resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-1.2.4.tgz#409afb94b9a3a6da9fac8134e91e205f40cc7330"
@@ -1159,7 +1172,7 @@ broccoli-jshint@^1.0.0:
     json-stable-stringify "^1.0.0"
     mkdirp "~0.4.0"
 
-broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@~0.2.0:
+broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@^0.2.6, broccoli-kitchen-sink-helpers@~0.2.0:
   version "0.2.9"
   resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.2.9.tgz#a5e0986ed8d76fb5984b68c3f0450d3a96e36ecc"
   dependencies:
@@ -1296,7 +1309,7 @@ broccoli-viz@^2.0.1:
   version "2.0.1"
   resolved "https://registry.yarnpkg.com/broccoli-viz/-/broccoli-viz-2.0.1.tgz#3f3ed2fb83e368aa5306fae460801dea552e40db"
 
-broccoli-writer@~0.1.1:
+broccoli-writer@^0.1.1, broccoli-writer@~0.1.1:
   version "0.1.1"
   resolved "https://registry.yarnpkg.com/broccoli-writer/-/broccoli-writer-0.1.1.tgz#d4d71aa8f2afbc67a3866b91a2da79084b96ab2d"
   dependencies:
@@ -1935,7 +1948,7 @@ ember-cli-app-version@^1.0.0:
     ember-cli-htmlbars "^1.0.0"
     git-repo-version "0.3.0"
 
-ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.10, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6, ember-cli-babel@^5.1.7:
+ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.10, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6, ember-cli-babel@^5.1.7, ember-cli-babel@^5.2.4:
   version "5.2.4"
   resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.2.4.tgz#5ce4f46b08ed6f6d21e878619fb689719d6e8e13"
   dependencies:
@@ -1983,6 +1996,13 @@ ember-cli-htmlbars-inline-precompile@^0.3.1:
     ember-cli-htmlbars "^1.0.0"
     hash-for-dep "^1.0.2"
 
+ember-cli-htmlbars@0.7.9:
+  version "0.7.9"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-0.7.9.tgz#142cd4325ab3f48c76cf8dc4d3a3800f38e721be"
+  dependencies:
+    broccoli-filter "^0.1.6"
+    ember-cli-version-checker "^1.0.2"
+
 ember-cli-htmlbars@^1.0.0, ember-cli-htmlbars@^1.0.1:
   version "1.3.0"
   resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-1.3.0.tgz#e090f011239153bf45dab29625f94a46fce205af"
@@ -2188,6 +2208,14 @@ ember-cli@2.3.0:
     walk-sync "^0.2.6"
     yam "0.0.18"
 
+"ember-collection@git://github.com/emberjs/ember-collection.git#4dbe10b7498886e277fc21b28139924f908d1926":
+  version "1.0.0-alpha.4"
+  resolved "git://github.com/emberjs/ember-collection.git#4dbe10b7498886e277fc21b28139924f908d1926"
+  dependencies:
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "0.7.9"
+    layout-bin-packer "^1.2.0"
+
 ember-cp-validations@2.9.5:
   version "2.9.5"
   resolved "https://registry.yarnpkg.com/ember-cp-validations/-/ember-cp-validations-2.9.5.tgz#d3e81f6c6365f87e833af9c1f6fc8f35974f68d2"
@@ -3502,6 +3530,12 @@ klaw@^1.0.0:
   optionalDependencies:
     graceful-fs "^4.1.9"
 
+layout-bin-packer@^1.2.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/layout-bin-packer/-/layout-bin-packer-1.3.0.tgz#6f232f67db7606b2a405f39ae7197f2931a26c0c"
+  dependencies:
+    ember-cli-babel "^5.2.4"
+
 lazy-cache@^1.0.3:
   version "1.0.4"
   resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
@@ -3948,6 +3982,10 @@ mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkd
   dependencies:
     minimist "0.0.8"
 
+mkdirp@^0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
 mkdirp@~0.4.0:
   version "0.4.2"
   resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.4.2.tgz#427c8c18ece398b932f6f666f4e1e5b7740e78c8"
@@ -5066,10 +5104,6 @@ spdx-expression-parse@~1.0.0:
   version "1.0.4"
   resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c"
 
-spdx-license-ids@*:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-2.0.1.tgz#02017bcc3534ee4ffef6d58d20e7d3e9a1c3c8ec"
-
 spdx-license-ids@^1.0.0, spdx-license-ids@^1.0.2:
   version "1.2.2"
   resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57"
@@ -5141,12 +5175,6 @@ stringstream@~0.0.4:
   version "0.0.5"
   resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
 
-strip-ansi@*, strip-ansi@^3.0.0, strip-ansi@^3.0.1:
-  version "3.0.1"
-  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
-  dependencies:
-    ansi-regex "^2.0.0"
-
 strip-ansi@^0.3.0:
   version "0.3.0"
   resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220"
@@ -5159,6 +5187,12 @@ strip-ansi@^2.0.1:
   dependencies:
     ansi-regex "^1.0.0"
 
+strip-ansi@^3.0.0, strip-ansi@^3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
 strip-ansi@~0.1.0:
   version "0.1.1"
   resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991"


[13/33] ambari git commit: AMBARI-21317. Config update API should not need to have a unique tag, BE can auto add the tag when it is missing.(vbrodetskyi)

Posted by rl...@apache.org.
AMBARI-21317. Config update API should not need to have a unique tag, BE can auto add the tag when it is missing.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/40e6352b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/40e6352b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/40e6352b

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 40e6352b0e704ca1af7b0e88a267b03bde5cea59
Parents: 8634718
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Tue Jun 27 22:44:28 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Tue Jun 27 22:44:28 2017 +0300

----------------------------------------------------------------------
 .../controller/AmbariManagementController.java  |  4 +++
 .../AmbariManagementControllerImpl.java         | 29 +++++++++++++++---
 .../server/controller/ConfigGroupResponse.java  | 10 +++++++
 .../internal/ConfigGroupResourceProvider.java   | 31 ++++++++++++++++++--
 .../apache/ambari/server/state/ConfigImpl.java  |  3 +-
 .../AmbariManagementControllerImplTest.java     | 16 ++++++++--
 .../ConfigGroupResourceProviderTest.java        |  2 ++
 7 files changed, 85 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
index bb1c95e..f0f13e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
@@ -911,5 +911,9 @@ public interface AmbariManagementController {
    */
   QuickLinkVisibilityController getQuicklinkVisibilityController();
 
+  ConfigGroupResponse getConfigGroupUpdateResults(ConfigGroupRequest configGroupRequest);
+
+  void saveConfigGroupUpdate(ConfigGroupRequest configGroupRequest, ConfigGroupResponse configGroupResponse);
+
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 6781f65..77883e3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -65,6 +65,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
 import javax.persistence.RollbackException;
@@ -346,6 +347,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
   private Cache<ClusterRequest, ClusterResponse> clusterUpdateCache =
       CacheBuilder.newBuilder().expireAfterWrite(5, TimeUnit.MINUTES).build();
+  private Cache<ConfigGroupRequest, ConfigGroupResponse> configGroupUpdateCache =
+          CacheBuilder.newBuilder().expireAfterWrite(5, TimeUnit.MINUTES).build();
 
   @Inject
   private AmbariCustomCommandExecutionHelper customCommandExecutionHelper;
@@ -1632,6 +1635,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       cluster = clusters.getClusterById(request.getClusterId());
     }
 
+    List<ConfigurationRequest> desiredConfigs = request.getDesiredConfig();
+    if (desiredConfigs != null) {
+      for (ConfigurationRequest configurationRequest : desiredConfigs) {
+        if (StringUtils.isEmpty(configurationRequest.getVersionTag())) {
+          configurationRequest.setVersionTag(UUID.randomUUID().toString());
+        }
+      }
+    }
+
     // Ensure the user has access to update this cluster
     AuthorizationHelper.verifyAuthorization(ResourceType.CLUSTER, cluster.getResourceId(), RoleAuthorization.AUTHORIZATIONS_UPDATE_CLUSTER);
 
@@ -1640,7 +1652,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       new LinkedList<>();
     ServiceConfigVersionResponse serviceConfigVersionResponse = null;
 
-    if (request.getDesiredConfig() != null && request.getServiceConfigVersionRequest() != null) {
+    if (desiredConfigs != null && request.getServiceConfigVersionRequest() != null) {
       String msg = "Unable to set desired configs and rollback at same time, request = " + request;
       LOG.error(msg);
       throw new IllegalArgumentException(msg);
@@ -1661,8 +1673,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     //check if desired configs are available in request and they were changed
     boolean isConfigurationCreationNeeded = false;
-    if (request.getDesiredConfig() != null) {
-      for (ConfigurationRequest desiredConfig : request.getDesiredConfig()) {
+    if (desiredConfigs != null) {
+      for (ConfigurationRequest desiredConfig : desiredConfigs) {
         Map<String, String> requestConfigProperties = desiredConfig.getProperties();
         Map<String,Map<String,String>> requestConfigAttributes = desiredConfig.getPropertiesAttributes();
 
@@ -1739,7 +1751,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     // set or create configuration mapping (and optionally create the map of properties)
     if (isConfigurationCreationNeeded) {
-      List<ConfigurationRequest> desiredConfigs = request.getDesiredConfig();
 
       if (!desiredConfigs.isEmpty()) {
         Set<Config> configs = new HashSet<>();
@@ -2073,6 +2084,16 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   }
 
   @Override
+  public ConfigGroupResponse getConfigGroupUpdateResults(ConfigGroupRequest configGroupRequest) {
+    return configGroupUpdateCache.getIfPresent(configGroupRequest);
+  }
+
+  @Override
+  public void saveConfigGroupUpdate(ConfigGroupRequest configGroupRequest, ConfigGroupResponse configGroupResponse) {
+    configGroupUpdateCache.put(configGroupRequest, configGroupResponse);
+  }
+
+  @Override
   public String getJobTrackerHost(Cluster cluster) {
     try {
       Service svc = cluster.getService("MAPREDUCE");

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupResponse.java
index 58c680d..937df46 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupResponse.java
@@ -17,6 +17,7 @@
  */
 package org.apache.ambari.server.controller;
 
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
@@ -28,6 +29,7 @@ public class ConfigGroupResponse {
   private String description;
   private Set<Map<String, Object>> hosts;
   private Set<Map<String, Object>> configVersions;
+  private Set<Map<String, Object>> versionTags =  new HashSet<Map<String, Object>>();
 
   public ConfigGroupResponse(Long id, String clusterName,
           String groupName, String tag, String description,
@@ -97,4 +99,12 @@ public class ConfigGroupResponse {
   public void setConfigurations(Set<Map<String, Object>> configurations) {
     this.configVersions = configurations;
   }
+
+  public Set<Map<String, Object>> getVersionTags() {
+    return versionTags;
+  }
+
+  public void setVersionTags(Set<Map<String, Object>> versionTags) {
+    this.versionTags = versionTags;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
index c2b998c..25af9d2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
@@ -96,6 +96,8 @@ public class ConfigGroupResourceProvider extends
     .getPropertyId("ConfigGroup", "hosts");
   public static final String CONFIGGROUP_CONFIGS_PROPERTY_ID =
     PropertyHelper.getPropertyId("ConfigGroup", "desired_configs");
+  public static final String CONFIGGROUP_VERSION_TAGS_PROPERTY_ID =
+    PropertyHelper.getPropertyId("ConfigGroup", "version_tags");
 
   private static Set<String> pkPropertyIds = new HashSet<>(Arrays
     .asList(new String[]{CONFIGGROUP_ID_PROPERTY_ID}));
@@ -217,9 +219,23 @@ public class ConfigGroupResourceProvider extends
 
     RequestStatus status = updateResources(requests);
 
+    Set<Resource> associatedResources = new HashSet<>();
+    for (ConfigGroupRequest configGroupRequest : requests) {
+      ConfigGroupResponse configGroupResponse = getManagementController().getConfigGroupUpdateResults(configGroupRequest);
+      Resource resource = new ResourceImpl(Resource.Type.ConfigGroup);
+
+      resource.setProperty(CONFIGGROUP_ID_PROPERTY_ID, configGroupResponse.getId());
+      resource.setProperty(CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID, configGroupResponse.getClusterName());
+      resource.setProperty(CONFIGGROUP_NAME_PROPERTY_ID, configGroupResponse.getGroupName());
+      resource.setProperty(CONFIGGROUP_TAG_PROPERTY_ID, configGroupResponse.getTag());
+      resource.setProperty(CONFIGGROUP_VERSION_TAGS_PROPERTY_ID, configGroupResponse.getVersionTags());
+
+      associatedResources.add(resource);
+    }
+
     notifyUpdate(Resource.Type.ConfigGroup, request, predicate);
 
-    return status;
+    return getRequestStatus(null, associatedResources);
   }
 
   @Override
@@ -701,7 +717,18 @@ public class ConfigGroupResourceProvider extends
 
       if (serviceName != null) {
         cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(),
-          request.getServiceConfigVersionNote(), configGroup);
+                request.getServiceConfigVersionNote(), configGroup);
+
+        ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(configGroup.getId(), cluster.getClusterName(), configGroup.getName(),
+                request.getTag(), "", new HashSet<Map<String, Object>>(), new HashSet<Map<String, Object>>());
+        Set<Map<String, Object>> versionTags = new HashSet<Map<String, Object>>();
+        Map<String, Object> tagsMap = new HashMap<String, Object>();
+        for (Config config : configGroup.getConfigurations().values()) {
+          tagsMap.put(config.getType(), config.getTag());
+        }
+        versionTags.add(tagsMap);
+        configGroupResponse.setVersionTags(versionTags);
+        getManagementController().saveConfigGroupUpdate(request, configGroupResponse);
       } else {
         LOG.warn("Could not determine service name for config group {}, service config version not created",
             configGroup.getId());

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 65b7863..cfcadd4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -22,6 +22,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.locks.ReadWriteLock;
 
@@ -133,7 +134,7 @@ public class ConfigImpl implements Config {
     version = cluster.getNextConfigVersion(type);
 
     // tag is nullable from factory but not in the DB, so ensure we generate something
-    tag = StringUtils.isBlank(tag) ? GENERATED_TAG_PREFIX + version : tag;
+    tag = StringUtils.isBlank(tag) ? UUID.randomUUID().toString() : tag;
     this.tag = tag;
 
     ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index c0e3ef1..eadc678 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -561,9 +561,14 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     ActionManager actionManager = createNiceMock(ActionManager.class);
     ClusterRequest clusterRequest = createNiceMock(ClusterRequest.class);
+    ConfigurationRequest configurationRequest = createNiceMock(ConfigurationRequest.class);
 
     // requests
-    Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
+    Set<ClusterRequest> setRequests = new HashSet<ClusterRequest>();
+    setRequests.add(clusterRequest);
+
+    List<ConfigurationRequest> configRequests = new ArrayList<>();
+    configRequests.add(configurationRequest);
 
     KerberosHelper kerberosHelper = createStrictMock(KerberosHelper.class);
     // expectations
@@ -573,6 +578,8 @@ public class AmbariManagementControllerImplTest {
     expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelper);
     expect(clusterRequest.getClusterName()).andReturn("clusterNew").times(3);
     expect(clusterRequest.getClusterId()).andReturn(1L).times(6);
+    expect(clusterRequest.getDesiredConfig()).andReturn(configRequests);
+    expect(configurationRequest.getVersionTag()).andReturn(null).times(1);
     expect(clusters.getClusterById(1L)).andReturn(cluster).times(2);
     expect(cluster.getClusterName()).andReturn("clusterOld").times(1);
 
@@ -582,8 +589,11 @@ public class AmbariManagementControllerImplTest {
     cluster.setClusterName("clusterNew");
     expectLastCall();
 
+    configurationRequest.setVersionTag(EasyMock.anyObject(String.class));
+    expectLastCall();
+
     // replay mocks
-    replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager);
+    replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, configurationRequest);
 
     // test
     AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector);
@@ -591,7 +601,7 @@ public class AmbariManagementControllerImplTest {
 
     // assert and verify
     assertSame(controller, controllerCapture.getValue());
-    verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager);
+    verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, configurationRequest);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
index 12cbadf..6dd0748 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
@@ -495,6 +495,8 @@ public class ConfigGroupResourceProviderTest {
     expect(hostEntity2.getHostId()).andReturn(2L).atLeastOnce();
     expect(h1.getHostId()).andReturn(1L).anyTimes();
     expect(h2.getHostId()).andReturn(2L).anyTimes();
+    expect(managementController.getConfigGroupUpdateResults((ConfigGroupRequest)anyObject())).
+            andReturn(new ConfigGroupResponse(1L, "", "", "", "", new HashSet<Map<String, Object>>(), new HashSet<Map<String, Object>>())).atLeastOnce();
 
     expect(configGroup.getName()).andReturn("test-1").anyTimes();
     expect(configGroup.getId()).andReturn(25L).anyTimes();


[20/33] ambari git commit: AMBARI-21274: Typo in stack advisor error message for yarn and mr queue config issues (sangeetar)

Posted by rl...@apache.org.
AMBARI-21274: Typo in stack advisor error message for yarn and mr queue config issues (sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/34462831
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/34462831
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/34462831

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 34462831450c3f592b3343940c2c623f9bb7489b
Parents: aac9fe6
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Wed Jun 28 09:16:48 2017 -0700
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Wed Jun 28 09:16:48 2017 -0700

----------------------------------------------------------------------
 ambari-server/src/main/resources/stacks/stack_advisor.py           | 2 +-
 .../src/test/python/stacks/2.0.6/common/test_stack_advisor.py      | 2 +-
 .../src/test/python/stacks/2.5/common/test_stack_advisor.py        | 2 +-
 .../src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py   | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/34462831/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index 3a39a34..8e08d82 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -2752,7 +2752,7 @@ class DefaultStackAdvisor(StackAdvisor):
     if len(leaf_queue_names) == 0:
       return None
     elif queue_name not in leaf_queue_names:
-      return self.getErrorItem("Queue is not exist or not corresponds to existing YARN leaf queue")
+      return self.getErrorItem("Queue does not exist or correspond to an existing YARN leaf queue")
 
     return None
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/34462831/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index b6f1965..0c4996b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -534,7 +534,7 @@ class TestHDP206StackAdvisor(TestCase):
     hosts = self.prepareHosts([])
     result = self.stackAdvisor.validateConfigurations(services, hosts)
     expectedItems = [
-      {'message': 'Queue is not exist or not corresponds to existing YARN leaf queue', 'level': 'ERROR'}
+      {'message': 'Queue does not exist or correspond to an existing YARN leaf queue', 'level': 'ERROR'}
     ]
     self.assertValidationResult(expectedItems, result)
     services["configurations"]["yarn-env"]["properties"]["service_check.queue.name"] = "ndfqueue"

http://git-wip-us.apache.org/repos/asf/ambari/blob/34462831/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index e62e00c..50f527d 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -5334,7 +5334,7 @@ class TestHDP25StackAdvisor(TestCase):
     hosts = self.prepareHosts([])
     result = self.stackAdvisor.validateConfigurations(services, hosts)
     expectedItems = [
-      {'message': 'Queue is not exist or not corresponds to existing YARN leaf queue', 'level': 'ERROR'}
+      {'message': 'Queue does not exist or correspond to an existing YARN leaf queue', 'level': 'ERROR'}
     ]
     self.assertValidationResult(expectedItems, result)
     services["configurations"]["yarn-env"]["properties"]["service_check.queue.name"] = "ndfqueue2"

http://git-wip-us.apache.org/repos/asf/ambari/blob/34462831/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py
index 5a95fe3..5f70db2 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py
@@ -1395,7 +1395,7 @@ class ODPi20StackAdvisor(DefaultStackAdvisor):
     if len(leaf_queue_names) == 0:
       return None
     elif queue_name not in leaf_queue_names:
-      return self.getErrorItem("Queue is not exist or not corresponds to existing YARN leaf queue")
+      return self.getErrorItem("Queue does not exist or correspond to an existing YARN leaf queue")
 
     return None
 


[19/33] ambari git commit: AMBARI-21352.Workflow Manager view build failure(Venkata Sairam)

Posted by rl...@apache.org.
AMBARI-21352.Workflow Manager view build failure(Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aac9fe6e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aac9fe6e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aac9fe6e

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: aac9fe6e2e358b4ad1a1cfb0b86c3231897e38f1
Parents: 5e50042
Author: Venkata Sairam <ve...@gmail.com>
Authored: Wed Jun 28 14:56:09 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Wed Jun 28 14:56:09 2017 +0530

----------------------------------------------------------------------
 contrib/views/wfmanager/src/main/resources/ui/bower.json | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aac9fe6e/contrib/views/wfmanager/src/main/resources/ui/bower.json
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/bower.json b/contrib/views/wfmanager/src/main/resources/ui/bower.json
index 3f9de44..9812fa6 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/bower.json
+++ b/contrib/views/wfmanager/src/main/resources/ui/bower.json
@@ -21,7 +21,7 @@
     "abdmob/x2js": "~1.2.0",
     "datatables": "~1.10.11",
     "vkBeautify": "https://github.com/vkiryukhin/vkBeautify.git",
-    "cytoscape": "2.7.20",
+    "cytoscape": "2.7.18",
     "cytoscape-dagre": "~1.3.0",
     "cytoscape-panzoom": "~2.4.0",
     "codemirror": "~5.15.0",


[26/33] ambari git commit: AMBARI-21343. Cleanup relevant Kerberos identities when a component is removed (amagyar)

Posted by rl...@apache.org.
AMBARI-21343. Cleanup relevant Kerberos identities when a component is removed (amagyar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8b5c7db6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8b5c7db6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8b5c7db6

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 8b5c7db602a0e1e2dfb214ec1d51884c16219467
Parents: 9d224f7
Author: Attila Magyar <am...@hortonworks.com>
Authored: Thu Jun 29 11:05:25 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Thu Jun 29 11:05:25 2017 +0200

----------------------------------------------------------------------
 .../ambari/server/controller/AmbariServer.java  |   4 +
 .../controller/DeleteIdentityHandler.java       | 283 +++++++++++++++++++
 .../server/controller/KerberosHelper.java       |   3 +
 .../server/controller/KerberosHelperImpl.java   |  31 +-
 .../OrderedRequestStageContainer.java           |  45 +++
 .../utilities/KerberosIdentityCleaner.java      | 135 +++++++++
 .../AbstractPrepareKerberosServerAction.java    |  19 +-
 .../server/serveraction/kerberos/Component.java |  74 +++++
 .../kerberos/FinalizeKerberosServerAction.java  |  27 +-
 .../kerberos/KerberosServerAction.java          |  27 ++
 .../kerberos/AbstractKerberosDescriptor.java    |  15 +
 .../kerberos/KerberosComponentDescriptor.java   |  15 +
 .../state/kerberos/KerberosDescriptor.java      |   8 -
 .../kerberos/KerberosIdentityDescriptor.java    |  30 ++
 .../kerberos/KerberosServiceDescriptor.java     |   6 +
 .../utilities/KerberosIdentityCleanerTest.java  | 204 +++++++++++++
 ambari-web/app/controllers/main/service/item.js |   6 +-
 17 files changed, 894 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index aeba739..8988be0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -76,6 +76,7 @@ import org.apache.ambari.server.controller.internal.UserPrivilegeResourceProvide
 import org.apache.ambari.server.controller.internal.ViewPermissionResourceProvider;
 import org.apache.ambari.server.controller.metrics.ThreadPoolEnabledPropertyProvider;
 import org.apache.ambari.server.controller.utilities.KerberosChecker;
+import org.apache.ambari.server.controller.utilities.KerberosIdentityCleaner;
 import org.apache.ambari.server.metrics.system.MetricsService;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.PersistenceType;
@@ -941,6 +942,9 @@ public class AmbariServer {
     BaseService.init(injector.getInstance(RequestAuditLogger.class));
 
     RetryHelper.init(injector.getInstance(Clusters.class), configs.getOperationsRetryAttempts());
+
+    KerberosIdentityCleaner identityCleaner = injector.getInstance(KerberosIdentityCleaner.class);
+    identityCleaner.register();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
new file mode 100644
index 0000000..aa098b6
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.apache.ambari.server.controller.KerberosHelperImpl.BASE_LOG_DIR;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.RoleCommand;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.actionmanager.StageFactory;
+import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.serveraction.ServerAction;
+import org.apache.ambari.server.serveraction.kerberos.AbstractPrepareKerberosServerAction;
+import org.apache.ambari.server.serveraction.kerberos.Component;
+import org.apache.ambari.server.serveraction.kerberos.DestroyPrincipalsServerAction;
+import org.apache.ambari.server.serveraction.kerberos.KDCType;
+import org.apache.ambari.server.serveraction.kerberos.KerberosOperationHandler;
+import org.apache.ambari.server.serveraction.kerberos.KerberosServerAction;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostServerActionEvent;
+import org.apache.ambari.server.utils.StageUtils;
+
+/**
+ * I delete kerberos identities (principals and keytabs) of a given component.
+ */
+class DeleteIdentityHandler {
+  private final AmbariCustomCommandExecutionHelper customCommandExecutionHelper;
+  private final Integer taskTimeout;
+  private final StageFactory stageFactory;
+  private final AmbariManagementController ambariManagementController;
+
+  public DeleteIdentityHandler(AmbariCustomCommandExecutionHelper customCommandExecutionHelper, Integer taskTimeout, StageFactory stageFactory, AmbariManagementController ambariManagementController) {
+    this.customCommandExecutionHelper = customCommandExecutionHelper;
+    this.taskTimeout = taskTimeout;
+    this.stageFactory = stageFactory;
+    this.ambariManagementController = ambariManagementController;
+  }
+
+  /**
+   * Creates and adds stages to the given stage container for deleting kerberos identities.
+   * The service component that belongs to the identity doesn't need to be installed.
+   */
+  public void addDeleteIdentityStages(Cluster cluster, OrderedRequestStageContainer stageContainer, CommandParams commandParameters, boolean manageIdentities)
+    throws AmbariException
+  {
+    ServiceComponentHostServerActionEvent event = new ServiceComponentHostServerActionEvent("AMBARI_SERVER", StageUtils.getHostName(), System.currentTimeMillis());
+    String hostParamsJson = StageUtils.getGson().toJson(customCommandExecutionHelper.createDefaultHostParams(cluster, cluster.getDesiredStackVersion()));
+    stageContainer.setClusterHostInfo(StageUtils.getGson().toJson(StageUtils.getClusterHostInfo(cluster)));
+    if (manageIdentities) {
+      addPrepareDeleteIdentity(cluster, hostParamsJson, event, commandParameters, stageContainer);
+      addDestroyPrincipals(cluster, hostParamsJson, event, commandParameters, stageContainer);
+      addDeleteKeytab(cluster, newHashSet(commandParameters.component.getHostName()), hostParamsJson, commandParameters, stageContainer);
+    }
+    addFinalize(cluster, hostParamsJson, event, stageContainer, commandParameters);
+  }
+
+  private void addPrepareDeleteIdentity(Cluster cluster,
+                                        String hostParamsJson, ServiceComponentHostServerActionEvent event,
+                                        CommandParams commandParameters,
+                                        OrderedRequestStageContainer stageContainer)
+    throws AmbariException
+  {
+    Stage stage = createServerActionStage(stageContainer.getLastStageId(),
+      cluster,
+      stageContainer.getId(),
+      "Prepare delete identities",
+      "{}",
+      hostParamsJson,
+      PrepareDeleteIdentityServerAction.class,
+      event,
+      commandParameters.asMap(),
+      "Prepare delete identities",
+      taskTimeout);
+    stageContainer.addStage(stage);
+  }
+
+  private void addDestroyPrincipals(Cluster cluster,
+                                    String hostParamsJson, ServiceComponentHostServerActionEvent event,
+                                    CommandParams commandParameters,
+                                    OrderedRequestStageContainer stageContainer)
+    throws AmbariException
+  {
+    Stage stage = createServerActionStage(stageContainer.getLastStageId(),
+      cluster,
+      stageContainer.getId(),
+      "Destroy Principals",
+      "{}",
+      hostParamsJson,
+      DestroyPrincipalsServerAction.class,
+      event,
+      commandParameters.asMap(),
+      "Destroy Principals",
+      Math.max(ServerAction.DEFAULT_LONG_RUNNING_TASK_TIMEOUT_SECONDS, taskTimeout));
+    stageContainer.addStage(stage);
+  }
+
+  private void addDeleteKeytab(Cluster cluster,
+                               Set<String> hostFilter,
+                               String hostParamsJson,
+                               CommandParams commandParameters,
+                               OrderedRequestStageContainer stageContainer)
+    throws AmbariException
+  {
+    Stage stage = createNewStage(stageContainer.getLastStageId(),
+      cluster,
+      stageContainer.getId(),
+      "Delete Keytabs",
+      commandParameters.asJson(),
+      hostParamsJson);
+
+    Map<String, String> requestParams = new HashMap<>();
+    List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
+    RequestResourceFilter reqResFilter = new RequestResourceFilter("KERBEROS", "KERBEROS_CLIENT", new ArrayList<>(hostFilter));
+    requestResourceFilters.add(reqResFilter);
+
+    ActionExecutionContext actionExecContext = new ActionExecutionContext(
+      cluster.getClusterName(),
+      "REMOVE_KEYTAB",
+      requestResourceFilters,
+      requestParams);
+    customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage, requestParams, null);
+    stageContainer.addStage(stage);
+  }
+
+  private void addFinalize(Cluster cluster,
+                           String hostParamsJson, ServiceComponentHostServerActionEvent event,
+                           OrderedRequestStageContainer requestStageContainer,
+                           CommandParams commandParameters)
+    throws AmbariException
+  {
+    Stage stage = createServerActionStage(requestStageContainer.getLastStageId(),
+      cluster,
+      requestStageContainer.getId(),
+      "Finalize Operations",
+      "{}",
+      hostParamsJson,
+      DeleteDataDirAction.class,
+      event,
+      commandParameters.asMap(),
+      "Finalize Operations", 300);
+    requestStageContainer.addStage(stage);
+  }
+
+
+  public static class CommandParams {
+    private final Component component;
+    private final List<String> identities;
+    private final String authName;
+    private final File dataDirectory;
+    private final String defaultRealm;
+    private final KDCType kdcType;
+
+    public CommandParams(Component component, List<String> identities, String authName, File dataDirectory, String defaultRealm, KDCType kdcType) {
+      this.component = component;
+      this.identities = identities;
+      this.authName = authName;
+      this.dataDirectory = dataDirectory;
+      this.defaultRealm = defaultRealm;
+      this.kdcType = kdcType;
+    }
+
+    public Map<String, String> asMap() {
+      Map<String, String> commandParameters = new HashMap<>();
+      commandParameters.put(KerberosServerAction.AUTHENTICATED_USER_NAME, authName);
+      commandParameters.put(KerberosServerAction.DEFAULT_REALM, defaultRealm);
+      commandParameters.put(KerberosServerAction.KDC_TYPE, kdcType.name());
+      commandParameters.put(KerberosServerAction.IDENTITY_FILTER, StageUtils.getGson().toJson(identities));
+      commandParameters.put(KerberosServerAction.COMPONENT_FILTER, StageUtils.getGson().toJson(component));
+      commandParameters.put(KerberosServerAction.DATA_DIRECTORY, dataDirectory.getAbsolutePath());
+      return commandParameters;
+    }
+
+    public String asJson() {
+      return StageUtils.getGson().toJson(asMap());
+    }
+  }
+
+  private static class PrepareDeleteIdentityServerAction extends AbstractPrepareKerberosServerAction {
+    @Override
+    public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext) throws AmbariException, InterruptedException {
+      KerberosDescriptor kerberosDescriptor = getKerberosDescriptor();
+      processServiceComponents(
+        getCluster(),
+        kerberosDescriptor,
+        Collections.singletonList(getComponentFilter()),
+        getIdentityFilter(),
+        dataDirectory(),
+        calculateConfig(kerberosDescriptor),
+        new HashMap<String, Map<String, String>>(),
+        false,
+        new HashMap<String, Set<String>>());
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", actionLog.getStdOut(), actionLog.getStdErr());
+    }
+
+    protected Component getComponentFilter() {
+      return StageUtils.getGson().fromJson(getCommandParameterValue(KerberosServerAction.COMPONENT_FILTER), Component.class);
+    }
+
+    private Map<String, Map<String, String>> calculateConfig(KerberosDescriptor kerberosDescriptor) throws AmbariException {
+      return getKerberosHelper().calculateConfigurations(getCluster(), null, kerberosDescriptor.getProperties());
+    }
+
+    private String dataDirectory() {
+      return getCommandParameterValue(getCommandParameters(), DATA_DIRECTORY);
+    }
+
+    private KerberosDescriptor getKerberosDescriptor() throws AmbariException {
+      return getKerberosHelper().getKerberosDescriptor(getCluster());
+    }
+  }
+
+  private Stage createNewStage(long id, Cluster cluster, long requestId, String requestContext, String commandParams, String hostParams) {
+    Stage stage = stageFactory.createNew(requestId,
+      BASE_LOG_DIR + File.pathSeparator + requestId,
+      cluster.getClusterName(),
+      cluster.getClusterId(),
+      requestContext,
+      commandParams,
+      hostParams);
+    stage.setStageId(id);
+    return stage;
+  }
+
+  private Stage createServerActionStage(long id, Cluster cluster, long requestId,
+                                       String requestContext,
+                                       String commandParams, String hostParams,
+                                       Class<? extends ServerAction> actionClass,
+                                       ServiceComponentHostServerActionEvent event,
+                                       Map<String, String> commandParameters, String commandDetail,
+                                       Integer timeout) throws AmbariException {
+
+    Stage stage = createNewStage(id, cluster, requestId, requestContext,  commandParams, hostParams);
+    stage.addServerActionCommand(actionClass.getName(), null, Role.AMBARI_SERVER_ACTION,
+      RoleCommand.EXECUTE, cluster.getClusterName(), event, commandParameters, commandDetail,
+      ambariManagementController.findConfigurationTagsWithOverrides(cluster, null), timeout,
+      false, false);
+
+    return stage;
+  }
+
+  private static class DeleteDataDirAction extends KerberosServerAction {
+
+    @Override
+    public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext) throws AmbariException, InterruptedException {
+      deleteDataDirectory(getCommandParameterValue(DATA_DIRECTORY));
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", actionLog.getStdOut(), actionLog.getStdErr());
+    }
+
+    @Override
+    protected CommandReport processIdentity(Map<String, String> identityRecord, String evaluatedPrincipal, KerberosOperationHandler operationHandler, Map<String, String> kerberosConfiguration, Map<String, Object> requestSharedDataContext) throws AmbariException {
+      return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index ca2dda5..cc0c048 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@ -27,6 +27,7 @@ import java.util.Set;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.internal.RequestStageContainer;
 import org.apache.ambari.server.security.credential.PrincipalKeyCredential;
+import org.apache.ambari.server.serveraction.kerberos.Component;
 import org.apache.ambari.server.serveraction.kerberos.KerberosAdminAuthenticationException;
 import org.apache.ambari.server.serveraction.kerberos.KerberosIdentityDataFileWriter;
 import org.apache.ambari.server.serveraction.kerberos.KerberosInvalidConfigurationException;
@@ -232,6 +233,8 @@ public interface KerberosHelper {
                                          RequestStageContainer requestStageContainer, Boolean manageIdentities)
       throws AmbariException, KerberosOperationException;
 
+  void deleteIdentity(Cluster cluster, Component component, List<String> identities) throws AmbariException, KerberosOperationException;
+
   /**
    * Updates the relevant configurations for the components specified in the service filter.
    * <p/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index d57fcd2..b30f8f6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -64,6 +64,7 @@ import org.apache.ambari.server.security.credential.PrincipalKeyCredential;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.serveraction.kerberos.CleanupServerAction;
+import org.apache.ambari.server.serveraction.kerberos.Component;
 import org.apache.ambari.server.serveraction.kerberos.ConfigureAmbariIdentitiesServerAction;
 import org.apache.ambari.server.serveraction.kerberos.CreateKeytabFilesServerAction;
 import org.apache.ambari.server.serveraction.kerberos.CreatePrincipalsServerAction;
@@ -130,7 +131,7 @@ import com.google.inject.persist.Transactional;
 @Singleton
 public class KerberosHelperImpl implements KerberosHelper {
 
-  private static final String BASE_LOG_DIR = "/tmp/ambari";
+  public static final String BASE_LOG_DIR = "/tmp/ambari";
 
   private static final Logger LOG = LoggerFactory.getLogger(KerberosHelperImpl.class);
 
@@ -296,6 +297,34 @@ public class KerberosHelperImpl implements KerberosHelper {
         requestStageContainer, new DeletePrincipalsAndKeytabsHandler());
   }
 
+  /**
+   * Deletes the kerberos identities of the given component, even if the component is already deleted.
+   */
+  @Override
+  public void deleteIdentity(Cluster cluster, Component component, List<String> identities) throws AmbariException, KerberosOperationException {
+    if (identities.isEmpty()) {
+      return;
+    }
+    KerberosDetails kerberosDetails = getKerberosDetails(cluster, null);
+    validateKDCCredentials(kerberosDetails, cluster);
+    File dataDirectory = createTemporaryDirectory();
+    RoleCommandOrder roleCommandOrder = ambariManagementController.getRoleCommandOrder(cluster);
+    DeleteIdentityHandler handler = new DeleteIdentityHandler(customCommandExecutionHelper, configuration.getDefaultServerTaskTimeout(), stageFactory, ambariManagementController);
+    DeleteIdentityHandler.CommandParams commandParameters = new DeleteIdentityHandler.CommandParams(
+      component,
+      identities,
+      ambariManagementController.getAuthName(),
+      dataDirectory,
+      kerberosDetails.getDefaultRealm(),
+      kerberosDetails.getKdcType());
+    OrderedRequestStageContainer stageContainer = new OrderedRequestStageContainer(
+      roleGraphFactory,
+      roleCommandOrder,
+      new RequestStageContainer(actionManager.getNextRequestId(), null, requestFactory, actionManager));
+    handler.addDeleteIdentityStages(cluster, stageContainer, commandParameters, kerberosDetails.manageIdentities());
+    stageContainer.getRequestStageContainer().persist();
+  }
+
   @Override
   public void configureServices(Cluster cluster, Map<String, Collection<String>> serviceFilter)
       throws AmbariException, KerberosInvalidConfigurationException {

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
new file mode 100644
index 0000000..6d8b5a3
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
@@ -0,0 +1,45 @@
+package org.apache.ambari.server.controller;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.controller.internal.RequestStageContainer;
+import org.apache.ambari.server.metadata.RoleCommandOrder;
+import org.apache.ambari.server.stageplanner.RoleGraph;
+import org.apache.ambari.server.stageplanner.RoleGraphFactory;
+
+/**
+ * An extension of RequestStageContainer that takes the role command order into consideration when adding stages
+ */
+public class OrderedRequestStageContainer {
+  private final RoleGraphFactory roleGraphFactory;
+  private final RoleCommandOrder roleCommandOrder;
+  private final RequestStageContainer requestStageContainer;
+
+  public OrderedRequestStageContainer(RoleGraphFactory roleGraphFactory, RoleCommandOrder roleCommandOrder, RequestStageContainer requestStageContainer) {
+    this.roleGraphFactory = roleGraphFactory;
+    this.roleCommandOrder = roleCommandOrder;
+    this.requestStageContainer = requestStageContainer;
+  }
+
+  public void addStage(Stage stage) throws AmbariException {
+    RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
+    roleGraph.build(stage);
+    requestStageContainer.addStages(roleGraph.getStages());
+  }
+
+  public long getLastStageId() {
+    return requestStageContainer.getLastStageId();
+  }
+
+  public long getId() {
+    return requestStageContainer.getId();
+  }
+
+  public RequestStageContainer getRequestStageContainer() {
+    return requestStageContainer;
+  }
+
+  public void setClusterHostInfo(String clusterHostInfo) {
+    this.requestStageContainer.setClusterHostInfo(clusterHostInfo);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
new file mode 100644
index 0000000..0a8462f
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.utilities;
+
+import static org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptor.nullToEmpty;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.events.ServiceComponentUninstalledEvent;
+import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.serveraction.kerberos.Component;
+import org.apache.ambari.server.serveraction.kerberos.KerberosMissingAdminCredentialsException;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.eventbus.Subscribe;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+@Singleton
+public class KerberosIdentityCleaner {
+  private final static Logger LOG = LoggerFactory.getLogger(KerberosIdentityCleaner.class);
+  private final AmbariEventPublisher eventPublisher;
+  private final KerberosHelper kerberosHelper;
+  private final Clusters clusters;
+
+  @Inject
+  public KerberosIdentityCleaner(AmbariEventPublisher eventPublisher, KerberosHelper kerberosHelper, Clusters clusters) {
+    this.eventPublisher = eventPublisher;
+    this.kerberosHelper = kerberosHelper;
+    this.clusters = clusters;
+  }
+
+  public void register() {
+    this.eventPublisher.register(this);
+  }
+
+  /**
+   * Removes kerberos identities (principals and keytabs) after a component was uninstalled.
+   * Keeps the identity if either the principal or the keytab is used by an other service
+   */
+  @Subscribe
+  public void componentRemoved(ServiceComponentUninstalledEvent event) throws KerberosMissingAdminCredentialsException {
+    try {
+      Cluster cluster = clusters.getCluster(event.getClusterId());
+      if (cluster.getSecurityType() != SecurityType.KERBEROS) {
+        return;
+      }
+      KerberosComponentDescriptor descriptor = componentDescriptor(cluster, event.getServiceName(), event.getComponentName());
+      if (descriptor == null) {
+        LOG.info("No kerberos descriptor for {}", event);
+        return;
+      }
+      List<String> identitiesToRemove = identityNames(skipSharedIdentities(descriptor.getIdentitiesSkipReferences(), cluster, event));
+      LOG.info("Deleting identities {} after an event {}",  identitiesToRemove, event);
+      kerberosHelper.deleteIdentity(cluster, new Component(event.getHostName(), event.getServiceName(), event.getComponentName()), identitiesToRemove);
+    } catch (Exception e) {
+      LOG.error("Error while deleting kerberos identity after an event: " + event, e);
+    }
+  }
+
+  private KerberosComponentDescriptor componentDescriptor(Cluster cluster, String serviceName, String componentName) throws AmbariException {
+    KerberosServiceDescriptor serviceDescriptor = kerberosHelper.getKerberosDescriptor(cluster).getService(serviceName);
+    return serviceDescriptor == null ? null : serviceDescriptor.getComponent(componentName);
+  }
+
+  private List<String> identityNames(List<KerberosIdentityDescriptor> identities) {
+    List<String> result = new ArrayList<>();
+    for (KerberosIdentityDescriptor each : identities) { result.add(each.getName()); }
+    return result;
+  }
+
+  private List<KerberosIdentityDescriptor> skipSharedIdentities(List<KerberosIdentityDescriptor> candidates, Cluster cluster, ServiceComponentUninstalledEvent event) throws AmbariException {
+    List<KerberosIdentityDescriptor> activeIdentities = activeIdentities(cluster, kerberosHelper.getKerberosDescriptor(cluster), event);
+    List<KerberosIdentityDescriptor> result = new ArrayList<>();
+    for (KerberosIdentityDescriptor candidate : candidates) {
+      if (!candidate.isShared(activeIdentities)) {
+        result.add(candidate);
+      } else {
+        LOG.debug("Skip removing shared identity: {}", candidate.getName());
+      }
+    }
+    return result;
+  }
+
+  private List<KerberosIdentityDescriptor> activeIdentities(Cluster cluster, KerberosDescriptor root, ServiceComponentUninstalledEvent event) {
+    List<KerberosIdentityDescriptor> result = new ArrayList<>();
+    result.addAll(nullToEmpty(root.getIdentities()));
+    for (Map.Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) {
+      KerberosServiceDescriptor serviceDescriptor = root.getService(serviceEntry.getKey());
+      if (serviceDescriptor == null) {
+        continue;
+      }
+      result.addAll(nullToEmpty(serviceDescriptor.getIdentities()));
+      for (String componentName : serviceEntry.getValue().getServiceComponents().keySet()) {
+        if (!sameComponent(event, componentName, serviceEntry.getKey())) {
+          result.addAll(serviceDescriptor.getComponentIdentities(componentName));
+        }
+      }
+    }
+    return result;
+  }
+
+  private boolean sameComponent(ServiceComponentUninstalledEvent event, String componentName, String serviceName) {
+    return event.getServiceName().equals(serviceName) && event.getComponentName().equals(componentName);
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/AbstractPrepareKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/AbstractPrepareKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/AbstractPrepareKerberosServerAction.java
index 7aac346..dd2b223 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/AbstractPrepareKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/AbstractPrepareKerberosServerAction.java
@@ -21,6 +21,7 @@ package org.apache.ambari.server.serveraction.kerberos;
 import java.io.File;
 import java.io.IOException;
 import java.lang.reflect.Type;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -65,7 +66,7 @@ public abstract class AbstractPrepareKerberosServerAction extends KerberosServer
     throw new UnsupportedOperationException();
   }
 
-  KerberosHelper getKerberosHelper() {
+  protected KerberosHelper getKerberosHelper() {
     return kerberosHelper;
   }
 
@@ -76,6 +77,20 @@ public abstract class AbstractPrepareKerberosServerAction extends KerberosServer
                                     Map<String, Map<String, String>> kerberosConfigurations,
                                     boolean includeAmbariIdentity,
                                     Map<String, Set<String>> propertiesToBeIgnored) throws AmbariException {
+    List<Component> components = new ArrayList<>();
+    for (ServiceComponentHost each : schToProcess) {
+      components.add(Component.fromServiceComponentHost(each));
+    }
+    processServiceComponents(cluster, kerberosDescriptor, components, identityFilter, dataDirectory, currentConfigurations, kerberosConfigurations, includeAmbariIdentity, propertiesToBeIgnored);
+  }
+
+  protected void processServiceComponents(Cluster cluster, KerberosDescriptor kerberosDescriptor,
+                                          List<Component> schToProcess,
+                                          Collection<String> identityFilter, String dataDirectory,
+                                          Map<String, Map<String, String>> currentConfigurations,
+                                          Map<String, Map<String, String>> kerberosConfigurations,
+                                          boolean includeAmbariIdentity,
+                                          Map<String, Set<String>> propertiesToBeIgnored) throws AmbariException {
 
     actionLog.writeStdOut("Processing Kerberos identities and configurations");
 
@@ -113,7 +128,7 @@ public abstract class AbstractPrepareKerberosServerAction extends KerberosServer
         // Iterate over the components installed on the current host to get the service and
         // component-level Kerberos descriptors in order to determine which principals,
         // keytab files, and configurations need to be created or updated.
-        for (ServiceComponentHost sch : schToProcess) {
+        for (Component sch : schToProcess) {
           String hostName = sch.getHostName();
 
           String serviceName = sch.getServiceName();

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/Component.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/Component.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/Component.java
new file mode 100644
index 0000000..4f1ee52
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/Component.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.serveraction.kerberos;
+
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+
+public class Component {
+  private final String hostName;
+  private final String serviceName;
+  private final String serviceComponentName;
+
+  public static Component fromServiceComponentHost(ServiceComponentHost serviceComponentHost) {
+    return new Component(
+      serviceComponentHost.getHostName(),
+      serviceComponentHost.getServiceName(),
+      serviceComponentHost.getServiceComponentName());
+  }
+
+  public Component(String hostName, String serviceName, String serviceComponentName) {
+    this.hostName = hostName;
+    this.serviceName = serviceName;
+    this.serviceComponentName = serviceComponentName;
+  }
+
+  public String getHostName() {
+    return hostName;
+  }
+
+  public String getServiceName() {
+    return serviceName;
+  }
+
+  public String getServiceComponentName() {
+    return serviceComponentName;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    Component component = (Component) o;
+    return new EqualsBuilder()
+      .append(hostName, component.hostName)
+      .append(serviceName, component.serviceName)
+      .append(serviceComponentName, component.serviceComponentName)
+      .isEquals();
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(17, 37)
+      .append(hostName)
+      .append(serviceName)
+      .append(serviceComponentName)
+      .toHashCode();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/FinalizeKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/FinalizeKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/FinalizeKerberosServerAction.java
index 2742390..10ad48b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/FinalizeKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/FinalizeKerberosServerAction.java
@@ -18,8 +18,6 @@
 
 package org.apache.ambari.server.serveraction.kerberos;
 
-import java.io.File;
-import java.io.IOException;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -36,7 +34,6 @@ import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.utils.ShellCommandUtil;
 import org.apache.ambari.server.utils.StageUtils;
-import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -208,29 +205,9 @@ public class FinalizeKerberosServerAction extends KerberosServerAction {
       processIdentities(requestSharedDataContext);
       requestSharedDataContext.remove(this.getClass().getName() + "_visited");
     }
-
-    // Make sure this is a relevant directory. We don't want to accidentally allow _ANY_ directory
-    // to be deleted.
-    if ((dataDirectoryPath != null) && dataDirectoryPath.contains("/" + DATA_DIRECTORY_PREFIX)) {
-      File dataDirectory = new File(dataDirectoryPath);
-      File dataDirectoryParent = dataDirectory.getParentFile();
-
-      // Make sure this directory has a parent and it is writeable, else we wont be able to
-      // delete the directory
-      if ((dataDirectoryParent != null) && dataDirectory.isDirectory() &&
-          dataDirectoryParent.isDirectory() && dataDirectoryParent.canWrite()) {
-        try {
-          FileUtils.deleteDirectory(dataDirectory);
-        } catch (IOException e) {
-          // We should log this exception, but don't let it fail the process since if we got to this
-          // KerberosServerAction it is expected that the the overall process was a success.
-          String message = String.format("The data directory (%s) was not deleted due to an error condition - {%s}",
-              dataDirectory.getAbsolutePath(), e.getMessage());
-          LOG.warn(message, e);
-        }
-      }
-    }
+    deleteDataDirectory(dataDirectoryPath);
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", actionLog.getStdOut(), actionLog.getStdErr());
   }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
index d404133..2e331bb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
@@ -35,6 +35,7 @@ import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.io.FileUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -82,6 +83,8 @@ public abstract class KerberosServerAction extends AbstractServerAction {
    */
   public static final String IDENTITY_FILTER = "identity_filter";
 
+  public static final String COMPONENT_FILTER = "component_filter";
+
   /**
    * A (command parameter) property name used to hold the relevant KDC type value.  See
    * {@link org.apache.ambari.server.serveraction.kerberos.KDCType} for valid values
@@ -536,4 +539,28 @@ public abstract class KerberosServerAction extends AbstractServerAction {
 
     return commandReport;
   }
+
+  protected void deleteDataDirectory(String dataDirectoryPath) {
+    // Make sure this is a relevant directory. We don't want to accidentally allow _ANY_ directory
+    // to be deleted.
+    if ((dataDirectoryPath != null) && dataDirectoryPath.contains("/" + DATA_DIRECTORY_PREFIX)) {
+      File dataDirectory = new File(dataDirectoryPath);
+      File dataDirectoryParent = dataDirectory.getParentFile();
+
+      // Make sure this directory has a parent and it is writeable, else we wont be able to
+      // delete the directory
+      if ((dataDirectoryParent != null) && dataDirectory.isDirectory() &&
+          dataDirectoryParent.isDirectory() && dataDirectoryParent.canWrite()) {
+        try {
+          FileUtils.deleteDirectory(dataDirectory);
+        } catch (IOException e) {
+          // We should log this exception, but don't let it fail the process since if we got to this
+          // KerberosServerAction it is expected that the the overall process was a success.
+          String message = String.format("The data directory (%s) was not deleted due to an error condition - {%s}",
+              dataDirectory.getAbsolutePath(), e.getMessage());
+          LOG.warn(message, e);
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
index 397f384..38100ac 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
@@ -18,6 +18,9 @@
 
 package org.apache.ambari.server.state.kerberos;
 
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
@@ -181,6 +184,18 @@ public abstract class AbstractKerberosDescriptor {
     return root;
   }
 
+  public static <T> Collection<T> nullToEmpty(Collection<T> collection) {
+    return collection == null ? Collections.<T>emptyList() : collection;
+  }
+
+  public static <T> List<T> nullToEmpty(List<T> list) {
+    return list == null ? Collections.<T>emptyList() : list;
+  }
+
+  public static <K,V> Map<K,V> nullToEmpty(Map<K,V> collection) {
+    return collection == null ? Collections.<K,V>emptyMap() : collection;
+  }
+
   @Override
   public int hashCode() {
     return 37 *

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
index 768a17e..41d1f65 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
@@ -17,7 +17,9 @@
  */
 package org.apache.ambari.server.state.kerberos;
 
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -111,6 +113,19 @@ public class KerberosComponentDescriptor extends AbstractKerberosDescriptorConta
     return null;
   }
 
+  /**
+   * @return identities which are not references to other identities
+   */
+  public List<KerberosIdentityDescriptor> getIdentitiesSkipReferences() {
+    List<KerberosIdentityDescriptor> result = new ArrayList<>();
+    for (KerberosIdentityDescriptor each : nullToEmpty(getIdentities())) {
+      if (!each.getReferencedServiceName().isPresent() && each.getName() != null && !each.getName().startsWith("/")) {
+        result.add(each);
+      }
+    }
+    return result;
+  }
+
   @Override
   public int hashCode() {
     return 35 * super.hashCode();

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptor.java
index f9dfa4a..eba1b3a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptor.java
@@ -461,12 +461,4 @@ public class KerberosDescriptor extends AbstractKerberosDescriptorContainer {
       }
     }
   }
-
-  private static <T> Collection<T> nullToEmpty(Collection<T> collection) {
-    return collection == null ? Collections.<T>emptyList() : collection;
-  }
-
-  private static <K,V> Map<K,V> nullToEmpty(Map<K,V> collection) {
-    return collection == null ? Collections.<K,V>emptyMap() : collection;
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
index e180f7a..2023793 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
@@ -17,8 +17,10 @@
  */
 package org.apache.ambari.server.state.kerberos;
 
+import java.util.List;
 import java.util.Map;
 
+import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.collections.Predicate;
 import org.apache.ambari.server.collections.PredicateUtils;
 
@@ -369,6 +371,34 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
     }
   }
 
+  /**
+   * @return true if this identity either has the same principal or keytab as any of the given identities.
+   */
+  public boolean isShared(List<KerberosIdentityDescriptor> identities) throws AmbariException {
+    for (KerberosIdentityDescriptor each : identities) {
+      if (hasSamePrincipal(each) || hasSameKeytab(each)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private boolean hasSameKeytab(KerberosIdentityDescriptor that) {
+    try {
+      return this.getKeytabDescriptor().getFile().equals(that.getKeytabDescriptor().getFile());
+    } catch (NullPointerException e) {
+      return false;
+    }
+  }
+
+  private boolean hasSamePrincipal(KerberosIdentityDescriptor that) {
+    try {
+      return this.getPrincipalDescriptor().getValue().equals(that.getPrincipalDescriptor().getValue());
+    } catch (NullPointerException e) {
+      return false;
+    }
+  }
+
   @Override
   public int hashCode() {
     return super.hashCode() +

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosServiceDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosServiceDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosServiceDescriptor.java
index 8507bfa..0777327 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosServiceDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosServiceDescriptor.java
@@ -272,6 +272,12 @@ public class KerberosServiceDescriptor extends AbstractKerberosDescriptorContain
     return map;
   }
 
+  public List<KerberosIdentityDescriptor> getComponentIdentities(String componentName) {
+    return getComponent(componentName) != null
+      ? nullToEmpty(getComponent(componentName).getIdentities())
+      : Collections.<KerberosIdentityDescriptor>emptyList();
+  }
+
   @Override
   public int hashCode() {
     return super.hashCode() +

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
new file mode 100644
index 0000000..d22c92e
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.utilities;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.reset;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.events.ServiceComponentUninstalledEvent;
+import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.serveraction.kerberos.Component;
+import org.apache.ambari.server.serveraction.kerberos.KerberosMissingAdminCredentialsException;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+public class KerberosIdentityCleanerTest extends EasyMockSupport {
+  @Rule public EasyMockRule mocks = new EasyMockRule(this);
+  private static final String HOST = "c6401";
+  private static final String OOZIE = "OOZIE";
+  private static final String OOZIE_SERVER = "OOZIE_SERVER";
+  private static final String OOZIE_2 = "OOZIE2";
+  private static final String OOZIE_SERVER_2 = "OOZIE_SERVER2";
+  private static final String YARN_2 = "YARN2";
+  private static final String RESOURCE_MANAGER_2 = "RESOURCE_MANAGER2";
+  private static final String YARN = "YARN";
+  private static final String RESOURCE_MANAGER = "RESOURCE_MANAGER";
+  private static final long CLUSTER_ID = 1;
+  @Mock private KerberosHelper kerberosHelper;
+  @Mock private Clusters clusters;
+  @Mock private Cluster cluster;
+  private Map<String, Service> installedServices = new HashMap<>();
+  private KerberosDescriptorFactory kerberosDescriptorFactory = new KerberosDescriptorFactory();
+  private KerberosIdentityCleaner kerberosIdentityCleaner;
+  private KerberosDescriptor kerberosDescriptor;
+
+  @Test
+  public void removesAllKerberosIdentitesOfComponentAfterComponentWasUninstalled() throws Exception {
+    installComponent(OOZIE, OOZIE_SERVER);
+    kerberosHelper.deleteIdentity(cluster, new Component(HOST, OOZIE, OOZIE_SERVER), newArrayList("oozie_server1", "oozie_server2"));
+    expectLastCall().once();
+    replayAll();
+    uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
+    verifyAll();
+  }
+
+  @Test
+  public void skipsRemovingIdentityWhenServiceDoesNotExist() throws Exception {
+    replayAll();
+    uninstallComponent("NO_SUCH_SERVICE", OOZIE_SERVER, HOST);
+    verifyAll();
+  }
+
+  @Test
+  public void skipsRemovingIdentityThatIsSharedByPrincipalName() throws Exception {
+    installComponent(OOZIE, OOZIE_SERVER);
+    installComponent(OOZIE_2, OOZIE_SERVER_2);
+    kerberosHelper.deleteIdentity(cluster, new Component(HOST, OOZIE, OOZIE_SERVER), newArrayList("oozie_server1"));
+    expectLastCall().once();
+    replayAll();
+    uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
+    verifyAll();
+  }
+
+  @Test
+  public void skipsRemovingIdentityThatIsSharedByKeyTabFilePath() throws Exception {
+    installComponent(YARN, RESOURCE_MANAGER);
+    installComponent(YARN_2, RESOURCE_MANAGER_2);
+    kerberosHelper.deleteIdentity(cluster, new Component(HOST, YARN, RESOURCE_MANAGER), newArrayList("rm_unique"));
+    expectLastCall().once();
+    replayAll();
+    uninstallComponent(YARN, RESOURCE_MANAGER, HOST);
+    verifyAll();
+  }
+
+  @Test
+  public void skipsRemovingIdentityWhenClusterIsNotKerberized() throws Exception {
+    reset(cluster);
+    expect(cluster.getSecurityType()).andReturn(SecurityType.NONE).anyTimes();
+    replayAll();
+    uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
+    verifyAll();
+  }
+
+  private void installComponent(String serviceName, final String componentName) {
+    Service service = createMock(serviceName + "_" + componentName, Service.class);
+    installedServices.put(serviceName, service);
+    expect(service.getServiceComponents()).andReturn(new HashMap<String, ServiceComponent>() {{
+      put(componentName, null);
+    }}).anyTimes();
+  }
+
+  private void uninstallComponent(String service, String component, String host) throws KerberosMissingAdminCredentialsException {
+    kerberosIdentityCleaner.componentRemoved(new ServiceComponentUninstalledEvent(CLUSTER_ID, "any", "any", service, component, host, false));
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    kerberosIdentityCleaner = new KerberosIdentityCleaner(new AmbariEventPublisher(), kerberosHelper, clusters);
+    kerberosDescriptor = kerberosDescriptorFactory.createInstance("{" +
+      "  'services': [" +
+      "    {" +
+      "      'name': 'OOZIE'," +
+      "      'components': [" +
+      "        {" +
+      "          'name': 'OOZIE_SERVER'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': '/HDFS/NAMENODE/hdfs'" +
+      "            }," +
+      "            {" +
+      "              'name': 'oozie_server1'" +
+      "            }," +"" +
+      "            {" +
+      "              'name': 'oozie_server2'," +
+      "              'principal': { 'value': 'oozie/_HOST@EXAMPLE.COM' }" +
+      "            }" +
+      "          ]" +
+      "        }" +
+      "      ]" +
+      "    }," +
+      "    {" +
+      "      'name': 'OOZIE2'," +
+      "      'components': [" +
+      "        {" +
+      "          'name': 'OOZIE_SERVER2'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': 'oozie_server3'," +
+      "              'principal': { 'value': 'oozie/_HOST@EXAMPLE.COM' }" +
+      "            }" +"" +
+      "          ]" +
+      "        }" +
+      "      ]" +
+      "    }," +
+      "    {" +
+      "      'name': 'YARN'," +
+      "      'components': [" +
+      "        {" +
+      "          'name': 'RESOURCE_MANAGER'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': 'rm_unique'" +
+      "            }," +
+      "            {" +
+      "              'name': 'rm1-shared'," +
+      "              'keytab' : { 'file' : 'shared' }" +
+      "            }" +
+      "          ]" +
+      "        }" +
+      "      ]" +
+      "    }," +
+      "    {" +
+      "      'name': 'YARN2'," +
+      "      'components': [" +
+      "        {" +
+      "          'name': 'RESOURCE_MANAGER2'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': 'rm2-shared'," +
+      "              'keytab' : { 'file' : 'shared' }" +
+      "            }" +
+      "          ]" +
+      "        }" +
+      "      ]" +
+      "    }" +
+      "  ]" +
+      "}");
+    expect(clusters.getCluster(CLUSTER_ID)).andReturn(cluster).anyTimes();
+    expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
+    expect(kerberosHelper.getKerberosDescriptor(cluster)).andReturn(kerberosDescriptor).anyTimes();
+    expect(cluster.getServices()).andReturn(installedServices).anyTimes();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-web/app/controllers/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/item.js b/ambari-web/app/controllers/main/service/item.js
index 37713dc..197eb8e 100644
--- a/ambari-web/app/controllers/main/service/item.js
+++ b/ambari-web/app/controllers/main/service/item.js
@@ -1388,8 +1388,10 @@ App.MainServiceItemController = Em.Controller.extend(App.SupportClientConfigsDow
               this._super();
             }
           });
-        self.set('deleteServiceProgressPopup', progressPopup);
-        self.deleteServiceCall(serviceNames);
+        App.get('router.mainAdminKerberosController').getKDCSessionState(function() {
+          self.set('deleteServiceProgressPopup', progressPopup);
+          self.deleteServiceCall(serviceNames);
+        });
         this._super();
       },
 


[21/33] ambari git commit: AMBARI-21360: Ability to delete a view instance from view instance list (sangeetar)

Posted by rl...@apache.org.
AMBARI-21360: Ability to delete a view instance from view instance list (sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a3681c01
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a3681c01
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a3681c01

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: a3681c0199f31511170770d454697206cbeeeda3
Parents: 3446283
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Wed Jun 28 09:29:57 2017 -0700
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Wed Jun 28 09:29:57 2017 -0700

----------------------------------------------------------------------
 AMBARI-21360.patch                              | 45 ++++++++++++++++++++
 .../controllers/ambariViews/ViewsListCtrl.js    | 20 +++++++++
 .../app/views/ambariViews/listTable.html        |  3 ++
 3 files changed, 68 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a3681c01/AMBARI-21360.patch
----------------------------------------------------------------------
diff --git a/AMBARI-21360.patch b/AMBARI-21360.patch
new file mode 100644
index 0000000..c26f3a0
--- /dev/null
+++ b/AMBARI-21360.patch
@@ -0,0 +1,45 @@
+diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
+index c41e5d4..4e7bae3 100644
+--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
++++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
+@@ -132,6 +132,26 @@ angular.module('ambariAdminConsole')
+     }
+   };
+ 
++  $scope.deleteInstance = function(instance) {
++      ConfirmationModal.show(
++        $t('common.delete', {
++          term: $t('views.viewInstance')
++        }),
++        $t('common.deleteConfirmation', {
++          instanceType: $t('views.viewInstance'),
++          instanceName: instance.ViewInstanceInfo.label
++        })
++      ).then(function() {
++        View.deleteInstance(instance.ViewInstanceInfo.view_name, instance.ViewInstanceInfo.version, instance.ViewInstanceInfo.instance_name)
++          .then(function() {
++            loadViews();
++          })
++          .catch(function(data) {
++            Alert.error($t('views.alerts.cannotDeleteInstance'), data.data.message);
++          });
++      });
++    };
++
+   $scope.reloadViews = function () {
+     loadViews();
+   };
+diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
+index 59c322f..91b9a93 100644
+--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
++++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
+@@ -81,6 +81,9 @@
+                     <td class="col-sm-1">
+                         <a class="instance-link ng-scope ng-binding" href="#/views/{{view.view_name}}/versions/{{instance.ViewInstanceInfo.version}}/instances/{{instance.ViewInstanceInfo.instance_name}}/clone"><i class="fa fa-copy"></i></a>
+                     </td>
++                    <td class="col-sm-1">
++                        <a class="instance-link ng-scope ng-binding" href ng-click="deleteInstance(instance)"><i class="fa fa-trash-o"></i></a>
++                    </td>
+                 </tr>
+                 </tbody>
+                 <tfoot>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3681c01/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
index c41e5d4..4e7bae3 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
@@ -132,6 +132,26 @@ angular.module('ambariAdminConsole')
     }
   };
 
+  $scope.deleteInstance = function(instance) {
+      ConfirmationModal.show(
+        $t('common.delete', {
+          term: $t('views.viewInstance')
+        }),
+        $t('common.deleteConfirmation', {
+          instanceType: $t('views.viewInstance'),
+          instanceName: instance.ViewInstanceInfo.label
+        })
+      ).then(function() {
+        View.deleteInstance(instance.ViewInstanceInfo.view_name, instance.ViewInstanceInfo.version, instance.ViewInstanceInfo.instance_name)
+          .then(function() {
+            loadViews();
+          })
+          .catch(function(data) {
+            Alert.error($t('views.alerts.cannotDeleteInstance'), data.data.message);
+          });
+      });
+    };
+
   $scope.reloadViews = function () {
     loadViews();
   };

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3681c01/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
index 59c322f..91b9a93 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
@@ -81,6 +81,9 @@
                     <td class="col-sm-1">
                         <a class="instance-link ng-scope ng-binding" href="#/views/{{view.view_name}}/versions/{{instance.ViewInstanceInfo.version}}/instances/{{instance.ViewInstanceInfo.instance_name}}/clone"><i class="fa fa-copy"></i></a>
                     </td>
+                    <td class="col-sm-1">
+                        <a class="instance-link ng-scope ng-binding" href ng-click="deleteInstance(instance)"><i class="fa fa-trash-o"></i></a>
+                    </td>
                 </tr>
                 </tbody>
                 <tfoot>


[29/33] ambari git commit: AMBARI-21370: Support VIPs instead of Host Names (jluniya)

Posted by rl...@apache.org.
AMBARI-21370: Support VIPs instead of Host Names (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4d7cc7f3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4d7cc7f3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4d7cc7f3

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 4d7cc7f392a6c4b52d39456504ad490d74fd019a
Parents: 4cd3150
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Thu Jun 29 07:17:24 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Thu Jun 29 07:17:24 2017 -0700

----------------------------------------------------------------------
 .../ambari_agent/AlertSchedulerHandler.py       |  10 +-
 .../python/ambari_agent/alerts/base_alert.py    |   8 +-
 .../python/ambari_agent/alerts/port_alert.py    | 107 +++++++++++--------
 .../ambari_agent/TestAlertSchedulerHandler.py   |  17 +--
 .../server/agent/AlertDefinitionCommand.java    |   7 +-
 .../ambari/server/agent/HeartBeatHandler.java   |   4 +-
 .../internal/AbstractProviderModule.java        |  47 ++++++--
 .../server/controller/jmx/JMXHostProvider.java  |  13 +++
 .../controller/jmx/JMXPropertyProvider.java     |  25 +++++
 .../org/apache/ambari/server/state/Cluster.java |   8 ++
 .../server/state/alert/AlertDefinitionHash.java |  14 +--
 .../server/state/cluster/ClusterImpl.java       |  18 ++++
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |   4 +-
 .../package/scripts/namenode_upgrade.py         |   2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   4 +
 .../metrics/JMXPropertyProviderTest.java        |   9 ++
 .../state/alerts/AlertDefinitionHashTest.java   |   4 +-
 .../configs/ha_bootstrap_standby_node.json      |   2 +-
 ...ha_bootstrap_standby_node_initial_start.json |   2 +-
 ...dby_node_initial_start_dfs_nameservices.json |   2 +-
 20 files changed, 224 insertions(+), 83 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py b/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
index 6c1d29c..55c3d6e 100644
--- a/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
+++ b/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
@@ -283,6 +283,7 @@ class AlertSchedulerHandler():
     for command_json in all_commands:
       clusterName = '' if not 'clusterName' in command_json else command_json['clusterName']
       hostName = '' if not 'hostName' in command_json else command_json['hostName']
+      publicHostName = '' if not 'publicHostName' in command_json else command_json['publicHostName']
       clusterHash = None if not 'hash' in command_json else command_json['hash']
 
       # cache the cluster and cluster hash after loading the JSON
@@ -291,7 +292,7 @@ class AlertSchedulerHandler():
         self._cluster_hashes[clusterName] = clusterHash
 
       for definition in command_json['alertDefinitions']:
-        alert = self.__json_to_callable(clusterName, hostName, definition)
+        alert = self.__json_to_callable(clusterName, hostName, publicHostName, definition)
 
         if alert is None:
           continue
@@ -303,7 +304,7 @@ class AlertSchedulerHandler():
     return definitions
 
 
-  def __json_to_callable(self, clusterName, hostName, json_definition):
+  def __json_to_callable(self, clusterName, hostName, publicHostName, json_definition):
     """
     converts the json that represents all aspects of a definition
     and makes an object that extends BaseAlert that is used for individual
@@ -336,7 +337,7 @@ class AlertSchedulerHandler():
         alert = RecoveryAlert(json_definition, source, self.config, self.recovery_manger)
 
       if alert is not None:
-        alert.set_cluster(clusterName, hostName)
+        alert.set_cluster(clusterName, hostName, publicHostName)
 
     except Exception, exception:
       logger.exception("[AlertScheduler] Unable to load an invalid alert definition. It will be skipped.")
@@ -402,8 +403,9 @@ class AlertSchedulerHandler():
 
         clusterName = '' if not 'clusterName' in execution_command else execution_command['clusterName']
         hostName = '' if not 'hostName' in execution_command else execution_command['hostName']
+        publicHostName = '' if not 'publicHostName' in execution_command else execution_command['publicHostName']
 
-        alert = self.__json_to_callable(clusterName, hostName, alert_definition)
+        alert = self.__json_to_callable(clusterName, hostName, publicHostName, alert_definition)
 
         if alert is None:
           continue

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
index 7f3b2a5..add29fc 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
@@ -46,6 +46,7 @@ class BaseAlert(object):
     self.alert_source_meta = alert_source_meta
     self.cluster_name = ''
     self.host_name = ''
+    self.public_host_name = ''
     self.config = config
     
   def interval(self):
@@ -86,10 +87,13 @@ class BaseAlert(object):
     self.cluster_configuration = cluster_configuration
 
 
-  def set_cluster(self, cluster_name, host_name):
+  def set_cluster(self, cluster_name, host_name, public_host_name = None):
     """ sets cluster information for the alert """
     self.cluster_name = cluster_name
     self.host_name = host_name
+    self.public_host_name = host_name
+    if public_host_name:
+      self.public_host_name = public_host_name
 
 
   def _get_alert_meta_value_safely(self, meta_key):
@@ -452,7 +456,7 @@ class BaseAlert(object):
       # get the host for dfs.namenode.http-address.c1ha.nn1 and see if it's
       # this host
       value = self._get_configuration_value(key)
-      if value is not None and self.host_name in value:
+      if value is not None and (self.host_name in value or self.public_host_name in value):
         return AlertUri(uri=value, is_ssl_enabled=is_ssl_enabled)
 
     return None

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
index 1e32718..02cc91c 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
@@ -91,7 +91,9 @@ class PortAlert(BaseAlert):
     # if not parameterized, this will return the static value
     uri_value = self._get_configuration_value(self.uri)
 
+    host_not_specified = False
     if uri_value is None:
+      host_not_specified = True
       uri_value = self.host_name
       logger.debug("[Alert][{0}] Setting the URI to this host since it wasn't specified".format(
         self.get_name()))
@@ -112,6 +114,16 @@ class PortAlert(BaseAlert):
     host = BaseAlert.get_host_from_url(uri_value)
     if host is None or host == "localhost" or host == "0.0.0.0":
       host = self.host_name
+      host_not_specified = True
+
+    hosts = [host]
+    # If host is not specified in the uri, hence we are using current host name
+    # then also add public host name as a fallback.  
+    if host_not_specified and host.lower() == self.host_name.lower() \
+      and self.host_name.lower() != self.public_host_name.lower():
+      hosts.append(self.public_host_name)
+    if logger.isEnabledFor(logging.DEBUG):
+      logger.debug("[Alert][{0}] List of hosts = {1}".format(self.get_name(), hosts))
 
     try:
       port = int(get_port_from_url(uri_value))
@@ -122,51 +134,56 @@ class PortAlert(BaseAlert):
 
       port = self.default_port
 
-
-    if logger.isEnabledFor(logging.DEBUG):
-      logger.debug("[Alert][{0}] Checking {1} on port {2}".format(
-        self.get_name(), host, str(port)))
-
-    s = None
-    try:
-      s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-      s.settimeout(self.critical_timeout)
-
-      if OSCheck.is_windows_family():
-        # on windows 0.0.0.0 is invalid address to connect but on linux it resolved to 127.0.0.1
-        host = resolve_address(host)
-
-      start_time = time.time()
-      s.connect((host, port))
-      if self.socket_command is not None:
-        s.sendall(self.socket_command)
-        data = s.recv(1024)
-        if self.socket_command_response is not None and data != self.socket_command_response:
-          raise Exception("Expected response {0}, Actual response {1}".format(
-            self.socket_command_response, data))
-      end_time = time.time()
-      milliseconds = end_time - start_time
-      seconds = milliseconds / 1000.0
-
-      # not sure why this happens sometimes, but we don't always get a
-      # socket exception if the connect() is > than the critical threshold
-      if seconds >= self.critical_timeout:
-        return (self.RESULT_CRITICAL, ['Socket Timeout', host, port])
-
-      result = self.RESULT_OK
-      if seconds >= self.warning_timeout:
-        result = self.RESULT_WARNING
-
-      return (result, [seconds, port])
-    except Exception as e:
-      return (self.RESULT_CRITICAL, [str(e), host, port])
-    finally:
-      if s is not None:
-        try:
-          s.close()
-        except:
-          # no need to log a close failure
-          pass
+    exceptions = []
+
+    for host in hosts:
+      if logger.isEnabledFor(logging.DEBUG):
+        logger.debug("[Alert][{0}] Checking {1} on port {2}".format(
+          self.get_name(), host, str(port)))
+
+      s = None
+      try:
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        s.settimeout(self.critical_timeout)
+
+        if OSCheck.is_windows_family():
+          # on windows 0.0.0.0 is invalid address to connect but on linux it resolved to 127.0.0.1
+          host = resolve_address(host)
+
+        start_time = time.time()
+        s.connect((host, port))
+        if self.socket_command is not None:
+          s.sendall(self.socket_command)
+          data = s.recv(1024)
+          if self.socket_command_response is not None and data != self.socket_command_response:
+            raise Exception("Expected response {0}, Actual response {1}".format(
+              self.socket_command_response, data))
+        end_time = time.time()
+        milliseconds = end_time - start_time
+        seconds = milliseconds / 1000.0
+
+        # not sure why this happens sometimes, but we don't always get a
+        # socket exception if the connect() is > than the critical threshold
+        if seconds >= self.critical_timeout:
+          return (self.RESULT_CRITICAL, ['Socket Timeout', host, port])
+
+        result = self.RESULT_OK
+        if seconds >= self.warning_timeout:
+          result = self.RESULT_WARNING
+
+        return (result, [seconds, port])
+      except Exception as e:
+        exceptions.append(e)
+      finally:
+        if s is not None:
+          try:
+            s.close()
+          except:
+            # no need to log a close failure
+            pass
+
+    if exceptions:
+      return (self.RESULT_CRITICAL, [str(exceptions[0]), hosts[0], port])
 
   def _get_reporting_text(self, state):
     '''

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py b/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
index d1d27ef..fbcd33f 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
@@ -70,7 +70,7 @@ class TestAlertSchedulerHandler(TestCase):
       }
     }
 
-    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
+    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is not None)
     self.assertTrue(isinstance(callable_result, MetricAlert))
@@ -85,7 +85,7 @@ class TestAlertSchedulerHandler(TestCase):
       }
     }
 
-    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
+    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is not None)
     self.assertTrue(isinstance(callable_result, AmsAlert))
@@ -100,7 +100,7 @@ class TestAlertSchedulerHandler(TestCase):
     }
 
     scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
-    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
+    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is not None)
     self.assertTrue(isinstance(callable_result, PortAlert))
@@ -116,7 +116,7 @@ class TestAlertSchedulerHandler(TestCase):
     }
 
     scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
-    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
+    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is not None)
     self.assertTrue(isinstance(callable_result, WebAlert))
@@ -131,7 +131,7 @@ class TestAlertSchedulerHandler(TestCase):
     }
 
     scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
-    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
+    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is None)
 
@@ -174,6 +174,7 @@ class TestAlertSchedulerHandler(TestCase):
       {
         'clusterName': 'cluster',
         'hostName': 'host',
+        'publicHostName' : 'host',
         'alertDefinition': {
           'name': 'alert1'
         }
@@ -191,7 +192,7 @@ class TestAlertSchedulerHandler(TestCase):
 
     scheduler.execute_alert(execution_commands)
 
-    scheduler._AlertSchedulerHandler__json_to_callable.assert_called_with('cluster', 'host', {'name': 'alert1'})
+    scheduler._AlertSchedulerHandler__json_to_callable.assert_called_with('cluster', 'host', 'host', {'name': 'alert1'})
     self.assertTrue(alert_mock.collect.called)
 
   def test_execute_alert_from_extension(self):
@@ -199,6 +200,7 @@ class TestAlertSchedulerHandler(TestCase):
       {
         'clusterName': 'cluster',
         'hostName': 'host',
+        'publicHostName' : 'host',
         'alertDefinition': {
           'name': 'alert1'
         }
@@ -216,7 +218,7 @@ class TestAlertSchedulerHandler(TestCase):
 
     scheduler.execute_alert(execution_commands)
 
-    scheduler._AlertSchedulerHandler__json_to_callable.assert_called_with('cluster', 'host', {'name': 'alert1'})
+    scheduler._AlertSchedulerHandler__json_to_callable.assert_called_with('cluster', 'host', 'host', {'name': 'alert1'})
     self.assertTrue(alert_mock.collect.called)
 
   def test_load_definitions(self):
@@ -245,6 +247,7 @@ class TestAlertSchedulerHandler(TestCase):
       {
         'clusterName': 'cluster',
         'hostName': 'host',
+        'publicHostName' : 'host',
         'alertDefinition': {
           'name': 'alert1'
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/agent/AlertDefinitionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/AlertDefinitionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/AlertDefinitionCommand.java
index 2929087..be837db 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/AlertDefinitionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/AlertDefinitionCommand.java
@@ -46,6 +46,9 @@ public class AlertDefinitionCommand extends AgentCommand {
   @SerializedName("hostName")
   private final String m_hostName;
 
+  @SerializedName("publicHostName")
+  private final String m_publicHostName;
+
   @SerializedName("hash")
   private final String m_hash;
 
@@ -61,17 +64,19 @@ public class AlertDefinitionCommand extends AgentCommand {
    * @param clusterName
    *          the name of the cluster this response is for (
    * @param hostName
+   * @param publicHostName
    * @param hash
    * @param definitions
    *
    * @see AlertDefinitionHash
    */
-  public AlertDefinitionCommand(String clusterName, String hostName,
+  public AlertDefinitionCommand(String clusterName, String hostName, String publicHostName,
       String hash, List<AlertDefinition> definitions) {
     super(AgentCommandType.ALERT_DEFINITION_COMMAND);
 
     m_clusterName = clusterName;
     m_hostName = hostName;
+    m_publicHostName = publicHostName;
     m_hash = hash;
     m_definitions = definitions;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
index 89ec963..1bc4c36 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
@@ -557,8 +557,10 @@ public class HeartBeatHandler {
           clusterName, hostname);
 
       String hash = alertDefinitionHash.getHash(clusterName, hostname);
+      Host host = cluster.getHost(hostname);
+      String publicHostName = host == null? hostname : host.getPublicHostName();
       AlertDefinitionCommand command = new AlertDefinitionCommand(clusterName,
-          hostname, hash, definitions);
+          hostname, publicHostName, hash, definitions);
 
       command.addConfigs(configHelper, cluster);
       commands.add(command);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
index 0e4f3f4..f3211bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
@@ -65,6 +65,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.Service;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
@@ -459,6 +460,12 @@ public abstract class AbstractProviderModule implements ProviderModule,
   }
 
   @Override
+  public String getPublicHostName(String clusterName, String hostName) {
+    Host host = getHost(clusterName, hostName);
+    return host == null? hostName : host.getPublicHostName();
+  }
+
+  @Override
   public Set<String> getHostNames(String clusterName, String componentName) {
     Set<String> hosts = null;
     try {
@@ -472,6 +479,21 @@ public abstract class AbstractProviderModule implements ProviderModule,
   }
 
   @Override
+  public Host getHost(String clusterName, String hostName) {
+    Host host = null;
+    try {
+      Cluster cluster = managementController.getClusters().getCluster(clusterName);
+      if(cluster != null) {
+        host = cluster.getHost(hostName);
+      }
+    } catch (Exception e) {
+      LOG.warn("Exception in getting host info for jmx metrics: ", e);
+    }
+    return host;
+  }
+
+
+  @Override
   public boolean isCollectorComponentLive(String clusterName, MetricsService service) throws SystemException {
 
     final String collectorHostName = getCollectorHostName(clusterName, service);
@@ -528,12 +550,14 @@ public abstract class AbstractProviderModule implements ProviderModule,
               serviceConfigTypes.get(service)
           );
 
+          String publicHostName = getPublicHostName(clusterName, hostName);
           Map<String, String[]> componentPortsProperties = new HashMap<>();
           componentPortsProperties.put(
               componentName,
               getPortProperties(service,
                   componentName,
                   hostName,
+                  publicHostName,
                   configProperties,
                   httpsEnabled
               )
@@ -553,7 +577,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
             }
           }
 
-          initRpcSuffixes(clusterName, componentName, configType, currVersion, hostName);
+          initRpcSuffixes(clusterName, componentName, configType, currVersion, hostName, publicHostName);
         }
       } catch (Exception e) {
         LOG.error("Exception initializing jmx port maps. ", e);
@@ -575,8 +599,8 @@ public abstract class AbstractProviderModule implements ProviderModule,
   }
 
   /**
-   * Computes properties that contains proper port for {@code componentName} on {@code hostName}. Must contain custom logic
-   * for different configurations(like NAMENODE HA).
+   * Computes properties that contains proper port for {@code componentName} on {@code hostName}.
+   * Must contain custom logic for different configurations(like NAMENODE HA).
    * @param service service type
    * @param componentName component name
    * @param hostName host which contains requested component
@@ -584,16 +608,20 @@ public abstract class AbstractProviderModule implements ProviderModule,
    * @param httpsEnabled indicates if https enabled for component
    * @return property name that contain port for {@code componentName} on {@code hostName}
    */
-  String[] getPortProperties(Service.Type service, String componentName, String hostName, Map<String, Object> properties, boolean httpsEnabled) {
+  String[] getPortProperties(Service.Type service, String componentName,
+    String hostName, String publicHostName, Map<String, Object> properties, boolean httpsEnabled) {
     componentName = httpsEnabled ? componentName + "-HTTPS" : componentName;
     if(componentName.startsWith("NAMENODE") && properties.containsKey("dfs.internal.nameservices")) {
       componentName += "-HA";
-      return getNamenodeHaProperty(properties, serviceDesiredProperties.get(service).get(componentName), hostName);
+      return getNamenodeHaProperty(
+        properties, serviceDesiredProperties.get(service).get(componentName), hostName, publicHostName);
     }
     return serviceDesiredProperties.get(service).get(componentName);
   }
 
-  private String[] getNamenodeHaProperty(Map<String, Object> properties, String pattern[], String hostName) {
+  private String[] getNamenodeHaProperty(Map<String, Object> properties, String pattern[],
+    String hostName, String publicHostName) {
+
     // iterate over nameservices and namenodes, to find out namenode http(s) property for concrete host
     for(String nameserviceId : ((String)properties.get("dfs.internal.nameservices")).split(",")) {
       if(properties.containsKey("dfs.ha.namenodes."+nameserviceId)) {
@@ -605,7 +633,8 @@ public abstract class AbstractProviderModule implements ProviderModule,
           );
           if (properties.containsKey(propertyName)) {
             String propertyValue = (String)properties.get(propertyName);
-            if (propertyValue.split(":")[0].equals(hostName)) {
+            String propHostName = propertyValue.split(":")[0];
+            if (propHostName.equals(hostName) || propHostName.equals(publicHostName)) {
               return new String[] {propertyName};
             }
           }
@@ -1181,7 +1210,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
 
   private void initRpcSuffixes(String clusterName, String componentName,
                                String config, String configVersion,
-                               String hostName)
+                               String hostName, String publicHostName)
                               throws Exception {
     if (jmxDesiredRpcSuffixProperties.containsKey(componentName)) {
       Map<String, Map<String, String>> componentToPortsMap;
@@ -1209,7 +1238,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
           keys = jmxDesiredRpcSuffixProperties.get(componentName);
           Map<String, String[]> stringMap = jmxDesiredRpcSuffixProperties.get(componentName);
           for (String tag: stringMap.keySet()) {
-            keys.put(tag, getNamenodeHaProperty(configProperties, stringMap.get(tag), hostName));
+            keys.put(tag, getNamenodeHaProperty(configProperties, stringMap.get(tag), hostName, publicHostName));
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
index cbeea1c..dbf8eb7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
@@ -17,6 +17,9 @@
  */
 package org.apache.ambari.server.controller.jmx;
 
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.state.Host;
+
 import java.util.Set;
 
 import org.apache.ambari.server.controller.spi.SystemException;
@@ -26,6 +29,8 @@ import org.apache.ambari.server.controller.spi.SystemException;
  */
 public interface JMXHostProvider {
 
+  String getPublicHostName(String clusterName, String hostName);
+
   /**
    * Get the JMX host names for the given cluster name and component name.
    *
@@ -38,6 +43,14 @@ public interface JMXHostProvider {
   Set<String> getHostNames(String clusterName, String componentName);
 
   /**
+   * Get cluster host info given the host name
+   * @param clusterName
+   * @param hostName the host name
+   * @return the host info {@link Host}
+   */
+  Host getHost(String clusterName, String hostName);
+
+  /**
    * Get the port for the specified cluster name and component.
    *
    * @param clusterName    the cluster name

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
index 870d1ef..e4de377 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
@@ -40,6 +40,7 @@ import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.utilities.StreamProvider;
+import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.services.MetricsRetrievalService;
 import org.apache.ambari.server.state.services.MetricsRetrievalService.MetricSourceType;
 import org.slf4j.Logger;
@@ -254,6 +255,8 @@ public class JMXPropertyProvider extends ThreadPoolEnabledPropertyProvider {
     for (String hostName : hostNames) {
       try {
         String port = getPort(clusterName, componentName, hostName, httpsEnabled);
+        String publicHostName = jmxHostProvider.getPublicHostName(clusterName, hostName);
+
         if (port == null) {
           LOG.warn("Unable to get JMX metrics.  No port value for " + componentName);
           return resource;
@@ -268,6 +271,17 @@ public class JMXPropertyProvider extends ThreadPoolEnabledPropertyProvider {
         // check to see if there is a cached value and use it if there is
         JMXMetricHolder jmxMetricHolder = metricsRetrievalService.getCachedJMXMetric(jmxUrl);
 
+        if( jmxMetricHolder == null && !hostName.equalsIgnoreCase(publicHostName)) {
+          // build the URL using public host name
+          String publicJmxUrl = getSpec(protocol, publicHostName, port, "/jmx");
+
+          // always submit a request to cache the latest data
+          metricsRetrievalService.submitRequest(MetricSourceType.JMX, streamProvider, publicJmxUrl);
+
+          // check to see if there is a cached value and use it if there is
+          jmxMetricHolder = metricsRetrievalService.getCachedJMXMetric(publicJmxUrl);
+        }
+
         // if the ticket becomes invalid (timeout) then bail out
         if (!ticket.isValid()) {
           return resource;
@@ -290,6 +304,17 @@ public class JMXPropertyProvider extends ThreadPoolEnabledPropertyProvider {
                 metricsRetrievalService.submitRequest(MetricSourceType.JMX, streamProvider, adHocUrl);
                 JMXMetricHolder adHocJMXMetricHolder = metricsRetrievalService.getCachedJMXMetric(adHocUrl);
 
+                if( adHocJMXMetricHolder == null && !hostName.equalsIgnoreCase(publicHostName)) {
+                  // build the adhoc URL using public host name
+                  String publicAdHocUrl = getSpec(protocol, publicHostName, port, queryURL);
+
+                  // always submit a request to cache the latest data
+                  metricsRetrievalService.submitRequest(MetricSourceType.JMX, streamProvider, publicAdHocUrl);
+
+                  // check to see if there is a cached value and use it if there is
+                  adHocJMXMetricHolder = metricsRetrievalService.getCachedJMXMetric(publicAdHocUrl);
+                }
+
                 // if the ticket becomes invalid (timeout) then bail out
                 if (!ticket.isValid()) {
                   return resource;

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index b4ebcd8..b4f7120 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -133,6 +133,14 @@ public interface Cluster {
    */
   Set<String> getHosts(String serviceName, String componentName);
 
+  /**
+   * Get specific host info using host name.
+   *
+   * @param hostName the host name
+   * @return Host info {@link Host}
+   */
+  Host getHost(String hostName);
+
 
   /**
    * Adds schs to cluster AND persists them

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
index a79b05d..15f7048 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
@@ -462,7 +462,7 @@ public class AlertDefinitionHash {
         hostNames.add(host.getHostName());
       }
 
-      enqueueAgentCommands(clusterName, hostNames);
+      enqueueAgentCommands(cluster, clusterName, hostNames);
     } catch (AmbariException ae) {
       LOG.error("Unable to lookup cluster for alert definition commands", ae);
     }
@@ -484,15 +484,16 @@ public class AlertDefinitionHash {
    */
   public void enqueueAgentCommands(long clusterId, Collection<String> hosts) {
     String clusterName = null;
+    Cluster cluster = null;
 
     try {
-      Cluster cluster = m_clusters.get().getClusterById(clusterId);
+      cluster = m_clusters.get().getClusterById(clusterId);
       clusterName = cluster.getClusterName();
     } catch (AmbariException ae) {
       LOG.error("Unable to lookup cluster for alert definition commands", ae);
     }
 
-    enqueueAgentCommands(clusterName, hosts);
+    enqueueAgentCommands(cluster, clusterName, hosts);
   }
 
   /**
@@ -509,7 +510,7 @@ public class AlertDefinitionHash {
    * @param hosts
    *          the hosts to push {@link AlertDefinitionCommand}s for.
    */
-  private void enqueueAgentCommands(String clusterName, Collection<String> hosts) {
+  private void enqueueAgentCommands(Cluster cluster, String clusterName, Collection<String> hosts) {
     if (null == clusterName) {
       LOG.warn("Unable to create alert definition agent commands because of a null cluster name");
       return;
@@ -527,11 +528,12 @@ public class AlertDefinitionHash {
 
         String hash = getHash(clusterName, hostName);
 
+        Host host = cluster.getHost(hostName);
+        String publicHostName = host == null? hostName : host.getPublicHostName();
         AlertDefinitionCommand command = new AlertDefinitionCommand(
-            clusterName, hostName, hash, definitions);
+            clusterName, hostName, publicHostName, hash, definitions);
 
         try {
-          Cluster cluster = m_clusters.get().getCluster(clusterName);
           command.addConfigs(m_configHelper.get(), cluster);
         } catch (AmbariException ae) {
           LOG.warn("Unable to add configurations to alert definition command",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index a4bf815..06b6217 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -2124,6 +2124,24 @@ public class ClusterImpl implements Cluster {
   }
 
   @Override
+  public Host getHost(final String hostName) {
+    if (StringUtils.isEmpty(hostName)) {
+      return null;
+    }
+
+    Collection<Host> hosts = getHosts();
+    if(hosts != null) {
+      for (Host host : hosts) {
+        String hostString = host.getHostName();
+        if(hostName.equalsIgnoreCase(hostString)) {
+          return host;
+        }
+      }
+    }
+    return null;
+  }
+
+  @Override
   public Collection<Host> getHosts() {
     Map<String, Host> hosts;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 139fe98..7226d22 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -115,7 +115,7 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
 
     if params.dfs_ha_enabled and \
       params.dfs_ha_namenode_standby is not None and \
-      params.hostname == params.dfs_ha_namenode_standby:
+      (params.hostname == params.dfs_ha_namenode_standby or params.public_hostname == params.dfs_ha_namenode_standby):
         # if the current host is the standby NameNode in an HA deployment
         # run the bootstrap command, to start the NameNode in standby mode
         # this requires that the active NameNode is already up and running,
@@ -332,7 +332,7 @@ def format_namenode(force=None):
           )
   else:
     if params.dfs_ha_namenode_active is not None and \
-       params.hostname == params.dfs_ha_namenode_active:
+       (params.hostname == params.dfs_ha_namenode_active  or params.public_hostname == params.dfs_ha_namenode_active):
       # check and run the format command in the HA deployment scenario
       # only format the "active" namenode in an HA deployment
       if force:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
index f683dcc..14d6ce2 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
@@ -47,7 +47,7 @@ def prepare_upgrade_check_for_previous_dir():
 
   if params.dfs_ha_enabled:
     namenode_ha = NamenodeHAState()
-    if namenode_ha.is_active(params.hostname):
+    if namenode_ha.is_active(params.hostname) or namenode_ha.is_active(params.public_hostname):
       Logger.info("NameNode High Availability is enabled and this is the Active NameNode.")
 
       problematic_previous_namenode_dirs = set()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 82fd950..a9fc179 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -170,6 +170,7 @@ klist_path_local = get_klist_path(default('/configurations/kerberos-env/executab
 kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 #hosts
 hostname = config["hostname"]
+public_hostname = config["public_hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
@@ -307,6 +308,9 @@ if dfs_ha_enabled:
     if hostname.lower() in nn_host.lower():
       namenode_id = nn_id
       namenode_rpc = nn_host
+    elif public_hostname.lower() in nn_host.lower():
+      namenode_id = nn_id
+      namenode_rpc = nn_host
   # With HA enabled namenode_address is recomputed
   namenode_address = format('hdfs://{dfs_ha_nameservices}')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java
index 7e0c66d..156ee66 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java
@@ -53,6 +53,7 @@ import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.security.authorization.AuthorizationHelperInitializer;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.services.MetricsRetrievalService;
 import org.apache.ambari.server.utils.SynchronousThreadPoolExecutor;
 import org.junit.After;
@@ -604,11 +605,19 @@ public class JMXPropertyProviderTest {
       this.unknownPort = unknownPort;
     }
 
+    @Override public String getPublicHostName(final String clusterName, final String hostName) {
+      return null;
+    }
+
     @Override
     public Set<String> getHostNames(String clusterName, String componentName) {
       return null;
     }
 
+    @Override public Host getHost(final String clusterName, final String hostName) {
+      return null;
+    }
+
     @Override
     public String getPort(String clusterName, String componentName, String hostName, boolean httpsEnabled) throws SystemException {
       return getPort(clusterName, componentName, hostName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
index e6e288e..4895d82 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
@@ -379,10 +379,10 @@ public class AlertDefinitionHashTest extends TestCase {
     ActionQueue actionQueue = m_injector.getInstance(ActionQueue.class);
 
     AlertDefinitionCommand definitionCommand1 = new AlertDefinitionCommand(
-        CLUSTERNAME, HOSTNAME, "12345", null);
+        CLUSTERNAME, HOSTNAME, HOSTNAME, "12345", null);
 
     AlertDefinitionCommand definitionCommand2 = new AlertDefinitionCommand(
-        CLUSTERNAME, "anotherHost", "67890", null);
+        CLUSTERNAME, "anotherHost", "anotherHost", "67890", null);
 
     AlertExecutionCommand executionCommand = new AlertExecutionCommand(
         CLUSTERNAME, HOSTNAME, null);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
index 96f4d9d..df09021 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
@@ -36,7 +36,7 @@
         "script_type": "PYTHON"
     }, 
     "taskId": 93, 
-    "public_hostname": "c6401.ambari.apache.org", 
+    "public_hostname": "c6402.ambari.apache.org",
     "configurations": {
         "mapred-site": {
             "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
index de2742f..a0a8f36 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
@@ -37,7 +37,7 @@
         "phase": "INITIAL_START"
     }, 
     "taskId": 93, 
-    "public_hostname": "c6401.ambari.apache.org", 
+    "public_hostname": "c6402.ambari.apache.org",
     "configurations": {
         "mapred-site": {
             "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
index ba0fa8f..a3176bd 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
@@ -37,7 +37,7 @@
         "phase": "INITIAL_START"
     }, 
     "taskId": 93, 
-    "public_hostname": "c6401.ambari.apache.org", 
+    "public_hostname": "c6402.ambari.apache.org",
     "configurations": {
         "mapred-site": {
             "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 


[23/33] ambari git commit: AMBARI-21360: Ability to delete a view instance from view instance list (sangeetar)

Posted by rl...@apache.org.
AMBARI-21360: Ability to delete a view instance from view instance list (sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f4fb1742
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f4fb1742
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f4fb1742

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: f4fb1742b1ba29247da897f4ca1dd67a82a34c1b
Parents: 5c874cc
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Wed Jun 28 10:49:31 2017 -0700
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Wed Jun 28 10:49:31 2017 -0700

----------------------------------------------------------------------
 AMBARI-21360.patch | 45 ---------------------------------------------
 1 file changed, 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f4fb1742/AMBARI-21360.patch
----------------------------------------------------------------------
diff --git a/AMBARI-21360.patch b/AMBARI-21360.patch
deleted file mode 100644
index c26f3a0..0000000
--- a/AMBARI-21360.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
-index c41e5d4..4e7bae3 100644
---- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
-+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
-@@ -132,6 +132,26 @@ angular.module('ambariAdminConsole')
-     }
-   };
- 
-+  $scope.deleteInstance = function(instance) {
-+      ConfirmationModal.show(
-+        $t('common.delete', {
-+          term: $t('views.viewInstance')
-+        }),
-+        $t('common.deleteConfirmation', {
-+          instanceType: $t('views.viewInstance'),
-+          instanceName: instance.ViewInstanceInfo.label
-+        })
-+      ).then(function() {
-+        View.deleteInstance(instance.ViewInstanceInfo.view_name, instance.ViewInstanceInfo.version, instance.ViewInstanceInfo.instance_name)
-+          .then(function() {
-+            loadViews();
-+          })
-+          .catch(function(data) {
-+            Alert.error($t('views.alerts.cannotDeleteInstance'), data.data.message);
-+          });
-+      });
-+    };
-+
-   $scope.reloadViews = function () {
-     loadViews();
-   };
-diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
-index 59c322f..91b9a93 100644
---- a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
-+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
-@@ -81,6 +81,9 @@
-                     <td class="col-sm-1">
-                         <a class="instance-link ng-scope ng-binding" href="#/views/{{view.view_name}}/versions/{{instance.ViewInstanceInfo.version}}/instances/{{instance.ViewInstanceInfo.instance_name}}/clone"><i class="fa fa-copy"></i></a>
-                     </td>
-+                    <td class="col-sm-1">
-+                        <a class="instance-link ng-scope ng-binding" href ng-click="deleteInstance(instance)"><i class="fa fa-trash-o"></i></a>
-+                    </td>
-                 </tr>
-                 </tbody>
-                 <tfoot>


[17/33] ambari git commit: Revert "BUG-82124 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)"

Posted by rl...@apache.org.
Revert "BUG-82124 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)"

This reverts commit 39efba35980642b832f79c6afb332716045d859f.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4c1ea4c4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4c1ea4c4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4c1ea4c4

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 4c1ea4c46c71bcae5d054eb7465283bca85cc9e8
Parents: 39efba3
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Wed Jun 28 13:58:10 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Wed Jun 28 13:58:10 2017 +0530

----------------------------------------------------------------------
 .../common-services/HBASE/0.96.0.2.0/role_command_order.json      | 3 ++-
 .../common-services/HBASE/2.0.0.3.0/role_command_order.json       | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4c1ea4c4/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
index 58d0c1c..110b179 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
@@ -4,6 +4,7 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"]
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"]
+
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c1ea4c4/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
index 69f4bf6..44d0c61 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
@@ -4,7 +4,7 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"],
     "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"]
   }
 }


[08/33] ambari git commit: AMBARI-21277. Fail to create solr clients in Log Search / Log Feeder (oleewere)

Posted by rl...@apache.org.
AMBARI-21277. Fail to create solr clients in Log Search / Log Feeder (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/535660bb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/535660bb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/535660bb

Branch: refs/heads/branch-feature-AMBARI-20859
Commit: 535660bb75efefd21692b0558dc7d74e420903f2
Parents: 1e29590
Author: oleewere <ol...@gmail.com>
Authored: Mon Jun 26 15:44:34 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Tue Jun 27 13:21:40 2017 +0200

----------------------------------------------------------------------
 ambari-metrics/ambari-metrics-common/pom.xml | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/535660bb/ambari-metrics/ambari-metrics-common/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/pom.xml b/ambari-metrics/ambari-metrics-common/pom.xml
index f0d3963..cae9734 100644
--- a/ambari-metrics/ambari-metrics-common/pom.xml
+++ b/ambari-metrics/ambari-metrics-common/pom.xml
@@ -108,6 +108,10 @@
                   <pattern>org.jboss</pattern>
                   <shadedPattern>org.apache.hadoop.metrics2.sink.relocated.jboss</shadedPattern>
                 </relocation>
+                <relocation>
+                  <pattern>org.apache.http</pattern>
+                  <shadedPattern>org.apache.hadoop.metrics2.sink.relocated.apache.http</shadedPattern>
+                </relocation>
               </relocations>
             </configuration>
           </execution>