You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sr...@apache.org on 2015/06/18 04:11:42 UTC

ambari git commit: AMBARI-11986. Stack advisor not picking correct reference host consistently (srimanth)

Repository: ambari
Updated Branches:
  refs/heads/branch-2.1 77d4946e1 -> 98f6ac084


AMBARI-11986. Stack advisor not picking correct reference host consistently (srimanth)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/98f6ac08
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/98f6ac08
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/98f6ac08

Branch: refs/heads/branch-2.1
Commit: 98f6ac0844f700e270ce1a3f7a5b8c7274b3717c
Parents: 77d4946
Author: Srimanth Gunturi <sg...@hortonworks.com>
Authored: Wed Jun 17 17:30:59 2015 -0700
Committer: Srimanth Gunturi <sg...@hortonworks.com>
Committed: Wed Jun 17 18:47:24 2015 -0700

----------------------------------------------------------------------
 .../stacks/HDP/2.0.6/services/stack_advisor.py  | 15 +++-
 .../stacks/HDP/2.2/services/stack_advisor.py    |  4 +-
 .../stacks/2.0.6/common/test_stack_advisor.py   | 77 +++++++++++++++++++-
 3 files changed, 87 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/98f6ac08/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index b48c70a..d9da407 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -124,8 +124,8 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     putYarnProperty = self.putProperty(configurations, "yarn-site", services)
     putYarnEnvProperty = self.putProperty(configurations, "yarn-env", services)
     nodemanagerMinRam = 1048576 # 1TB in mb
-    for nodemanager in self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts):
-      nodemanagerMinRam = min(nodemanager["Hosts"]["total_mem"]/1024, nodemanagerMinRam)
+    if "referenceNodeManagerHost" in clusterData:
+      nodemanagerMinRam = min(clusterData["referenceNodeManagerHost"]["total_mem"]/1024, nodemanagerMinRam)
     putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers'] * clusterData['ramPerContainer'], nodemanagerMinRam))))
     putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
     putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
@@ -251,11 +251,18 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     }
 
     if len(hosts["items"]) > 0:
-      nodeManagerHost = self.getHostWithComponent("YARN", "NODEMANAGER", services, hosts)
-      if nodeManagerHost is not None:
+      nodeManagerHosts = self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts)
+      # NodeManager host with least memory is generally used in calculations as it will work in larger hosts.
+      if nodeManagerHosts is not None and len(nodeManagerHosts) > 0:
+        nodeManagerHost = nodeManagerHosts[0];
+        for nmHost in nodeManagerHosts:
+          if nmHost["Hosts"]["total_mem"] < nodeManagerHost["Hosts"]["total_mem"]:
+            nodeManagerHost = nmHost
         host = nodeManagerHost["Hosts"]
+        cluster["referenceNodeManagerHost"] = host
       else:
         host = hosts["items"][0]["Hosts"]
+      cluster["referenceHost"] = host
       cluster["cpu"] = host["cpu_count"]
       cluster["disk"] = len(host["disk_info"])
       cluster["ram"] = int(host["total_mem"] / (1024 * 1024))

http://git-wip-us.apache.org/repos/asf/ambari/blob/98f6ac08/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index a9bedeb..bdbe9b1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -630,8 +630,8 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
     self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
     putMapredProperty = self.putProperty(configurations, "mapred-site", services)
     nodemanagerMinRam = 1048576 # 1TB in mb
-    for nodemanager in self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts):
-      nodemanagerMinRam = min(nodemanager["Hosts"]["total_mem"]/1024, nodemanagerMinRam)
+    if "referenceNodeManagerHost" in clusterData:
+      nodemanagerMinRam = min(clusterData["referenceNodeManagerHost"]["total_mem"]/1024, nodemanagerMinRam)
     putMapredProperty('yarn.app.mapreduce.am.resource.mb', configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])
     putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(0.8 * int(configurations["mapred-site"]["properties"]["yarn.app.mapreduce.am.resource.mb"]))) + "m" + " -Dhdp.version=${hdp.version}")
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]

http://git-wip-us.apache.org/repos/asf/ambari/blob/98f6ac08/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 1636849..00622a7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -344,13 +344,83 @@ class TestHDP206StackAdvisor(TestCase):
       "ramPerContainer": 512,
       "mapMemory": 512,
       "reduceMemory": 512,
-      "amMemory": 512
+      "amMemory": 512,
+      "referenceHost": hosts["items"][0]["Hosts"]
     }
 
+    # Test - Cluster data with 1 host
     result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
+    self.assertEquals(result, expected)
 
+    # Test - Cluster data with 2 hosts - pick minimum memory
+    servicesList.append("YARN")
+    services = services = {"services":
+                  [{"StackServices":
+                      {"service_name" : "YARN",
+                       "service_version" : "2.6.0.2.2"
+                      },
+                    "components":[
+                      {
+                        "StackServiceComponents":{
+                          "advertise_version":"true",
+                          "cardinality":"1+",
+                          "component_category":"SLAVE",
+                          "component_name":"NODEMANAGER",
+                          "custom_commands":[
+
+                          ],
+                          "display_name":"NodeManager",
+                          "is_client":"false",
+                          "is_master":"false",
+                          "service_name":"YARN",
+                          "stack_name":"HDP",
+                          "stack_version":"2.2",
+                          "hostnames":[
+                            "host1",
+                            "host2"
+                          ]
+                        },
+                        "dependencies":[
+                        ]
+                      }
+                      ],
+                    }],
+                "configurations": {}
+    }
+    hosts["items"][0]["Hosts"]["host_name"] = "host1"
+    hosts["items"].append({
+        "Hosts": {
+            "cpu_count" : 4,
+            "total_mem" : 500000,
+            "host_name" : "host2",
+            "disk_info" : [
+              {"mountpoint" : "/"},
+              {"mountpoint" : "/dev/shm"},
+              {"mountpoint" : "/vagrant"},
+              {"mountpoint" : "/"},
+              {"mountpoint" : "/dev/shm"},
+              {"mountpoint" : "/"},
+              {"mountpoint" : "/dev/shm"},
+              {"mountpoint" : "/vagrant"}
+            ]
+          }
+        })
+    expected["referenceHost"] = hosts["items"][1]["Hosts"]
+    expected["referenceNodeManagerHost"] = hosts["items"][1]["Hosts"]
+    expected["amMemory"] = 256
+    expected["containers"] = 8
+    expected["cpu"] = 4
+    expected["totalAvailableRam"] = 2048
+    expected["mapMemory"] = 256
+    expected["minContainerSize"] = 256
+    expected["reduceMemory"] = 256
+    expected["ram"] = 0
+    expected["ramPerContainer"] = 256
+    expected["reservedRam"] = 1
+    result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, services)
     self.assertEquals(result, expected)
 
+
   def test_getConfigurationClusterSummary_withHBaseAnd48gbRam(self):
     servicesList = ["HBASE"]
     components = []
@@ -386,7 +456,8 @@ class TestHDP206StackAdvisor(TestCase):
       "ramPerContainer": 3072,
       "mapMemory": 3072,
       "reduceMemory": 3072,
-      "amMemory": 3072
+      "amMemory": 3072,
+      "referenceHost": hosts["items"][0]["Hosts"]
     }
 
     result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
@@ -816,4 +887,4 @@ class TestHDP206StackAdvisor(TestCase):
     }
 
     self.stackAdvisor.mergeValidators(parentValidators, childValidators)
-    self.assertEquals(expected, parentValidators)
\ No newline at end of file
+    self.assertEquals(expected, parentValidators)