You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2016/12/01 15:39:40 UTC
[1/2] ambari git commit: AMBARI-18713. use exclude list of mount
device types on docker containers (dgrinenko via dlysnichenko)
Repository: ambari
Updated Branches:
refs/heads/branch-2.5 3a3421388 -> ab6d55234
refs/heads/trunk 9b21f30b5 -> bb8be5ba6
AMBARI-18713. use exclude list of mount device types on docker containers (dgrinenko via dlysnichenko)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bb8be5ba
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bb8be5ba
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bb8be5ba
Branch: refs/heads/trunk
Commit: bb8be5ba6a950b64382fb1a44f04c1dbf24db382
Parents: 9b21f30
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Thu Dec 1 17:36:15 2016 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Thu Dec 1 17:36:15 2016 +0200
----------------------------------------------------------------------
.../src/main/resources/scripts/stack_advisor.py | 11 +-
.../HDP/2.0.6/configuration/cluster-env.xml | 10 +
.../stacks/HDP/2.0.6/services/stack_advisor.py | 47 +-
.../stacks/HDP/2.1/services/stack_advisor.py | 20 +-
.../stacks/HDP/2.2/services/stack_advisor.py | 7 +
.../src/main/resources/stacks/stack_advisor.py | 209 ++++++++-
.../stacks/2.0.6/common/test_stack_advisor.py | 16 +-
.../stacks/2.1/common/test_stack_advisor.py | 2 +
.../stacks/2.2/common/test_stack_advisor.py | 17 +-
.../test/python/stacks/test_stack_adviser.py | 239 ++++++++++
ambari-web/app/mixins.js | 1 -
.../app/utils/configs/config_initializer.js | 28 +-
.../mount_points_based_initializer_mixin.js | 340 --------------
ambari-web/test/utils/ajax/ajax_test.js | 9 +-
.../utils/configs/config_initializer_test.js | 457 -------------------
15 files changed, 562 insertions(+), 851 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-server/src/main/resources/scripts/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/stack_advisor.py b/ambari-server/src/main/resources/scripts/stack_advisor.py
index 5926c39..abfab87 100755
--- a/ambari-server/src/main/resources/scripts/stack_advisor.py
+++ b/ambari-server/src/main/resources/scripts/stack_advisor.py
@@ -70,13 +70,11 @@ def main(argv=None):
if len(args) < 3:
sys.stderr.write(USAGE)
sys.exit(2)
- pass
action = args[0]
if action not in ALL_ACTIONS:
sys.stderr.write(USAGE)
sys.exit(2)
- pass
hostsFile = args[1]
servicesFile = args[2]
@@ -89,6 +87,7 @@ def main(argv=None):
stackName = services["Versions"]["stack_name"]
stackVersion = services["Versions"]["stack_version"]
parentVersions = []
+
if "stack_hierarchy" in services["Versions"]:
parentVersions = services["Versions"]["stack_hierarchy"]["stack_versions"]
@@ -96,8 +95,9 @@ def main(argv=None):
# Perform action
actionDir = os.path.realpath(os.path.dirname(args[1]))
- result = {}
- result_file = "non_valid_result_file.json"
+
+ # filter
+ hosts = stackAdvisor.filterHostMounts(hosts, services)
if action == RECOMMEND_COMPONENT_LAYOUT_ACTION:
result = stackAdvisor.recommendComponentLayout(services, hosts)
@@ -111,12 +111,11 @@ def main(argv=None):
elif action == RECOMMEND_CONFIGURATION_DEPENDENCIES:
result = stackAdvisor.recommendConfigurationDependencies(services, hosts)
result_file = os.path.join(actionDir, "configurations.json")
- else: # action == VALIDATE_CONFIGURATIONS
+ else: # action == VALIDATE_CONFIGURATIONS
result = stackAdvisor.validateConfigurations(services, hosts)
result_file = os.path.join(actionDir, "configurations-validation.json")
dumpJson(result, result_file)
- pass
def instantiateStackAdvisor(stackName, stackVersion, parentVersions):
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index cc6c8a3..93680bf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -280,4 +280,14 @@ gpgcheck=0</value>
<description>YARN Memory widget should be hidden by default on the dashboard.</description>
<on-ambari-upgrade add="true"/>
</property>
+ <property>
+ <name>agent_mounts_ignore_list</name>
+ <value/>
+ <description>Comma separated list of the mounts which would be ignored by Ambari during property values suggestion by Stack Advisor</description>
+ <on-ambari-upgrade add="false"/>
+ <value-attributes>
+ <visible>true</visible>
+ <empty-value-valid>true</empty-value-valid>
+ </value-attributes>
+ </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 83014b7..cda8d36 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -101,9 +101,23 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
"HBASE": self.recommendHbaseConfigurations,
"STORM": self.recommendStormConfigurations,
"AMBARI_METRICS": self.recommendAmsConfigurations,
- "RANGER": self.recommendRangerConfigurations
+ "RANGER": self.recommendRangerConfigurations,
+ "ZOOKEEPER": self.recommendZookeeperConfigurations,
+ "OOZIE": self.recommendOozieConfigurations
}
+ def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+ oozie_mount_properties = [
+ ("oozie_data_dir", "OOZIE_SERVER", "/hadoop/oozie/data", "single"),
+ ]
+ self.updateMountProperties("oozie-env", oozie_mount_properties, configurations, services, hosts)
+
+ def recommendZookeeperConfigurations(self, configurations, clusterData, services, hosts):
+ zk_mount_properties = [
+ ("dataDir", "ZOOKEEPER_SERVER", "/hadoop/zookeeper", "single"),
+ ]
+ self.updateMountProperties("zoo.cfg", zk_mount_properties, configurations, services, hosts)
+
def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
putYarnProperty = self.putProperty(configurations, "yarn-site", services)
putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
@@ -116,6 +130,15 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
putYarnEnvProperty('min_user_id', self.get_system_min_uid())
+ yarn_mount_properties = [
+ ("yarn.nodemanager.local-dirs", "NODEMANAGER", "/hadoop/yarn/local", "multi"),
+ ("yarn.nodemanager.log-dirs", "NODEMANAGER", "/hadoop/yarn/log", "multi"),
+ ("yarn.timeline-service.leveldb-timeline-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single"),
+ ("yarn.timeline-service.leveldb-state-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single")
+ ]
+
+ self.updateMountProperties("yarn-site", yarn_mount_properties, configurations, services, hosts)
+
sc_queue_name = self.recommendYarnQueue(services, "yarn-env", "service_check.queue.name")
if sc_queue_name is not None:
putYarnEnvProperty("service_check.queue.name", sc_queue_name)
@@ -146,6 +169,13 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
+
+ mapred_mounts = [
+ ("mapred.local.dir", ["TASKTRACKER", "NODEMANAGER"], "/hadoop/mapred", "multi")
+ ]
+
+ self.updateMountProperties("mapred-site", mapred_mounts, configurations, services, hosts)
+
mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
if mr_queue is not None:
putMapredProperty("mapreduce.job.queuename", mr_queue)
@@ -393,12 +423,15 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
if len(namenodes.split(',')) > 1:
putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true")
- #Initialize default 'dfs.datanode.data.dir' if needed
- if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):
- dataDirs = '/hadoop/hdfs/data'
- putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)
- else:
- dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
+ hdfs_mount_properties = [
+ ("dfs.datanode.data.dir", "DATANODE", "/hadoop/hdfs/data", "multi"),
+ ("dfs.namenode.name.dir", "DATANODE", "/hadoop/hdfs/namenode", "multi"),
+ ("dfs.namenode.checkpoint.dir", "SECONDARY_NAMENODE", "/hadoop/hdfs/namesecondary", "single")
+ ]
+
+ self.updateMountProperties("hdfs-site", hdfs_mount_properties, configurations, services, hosts)
+
+ dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
# dfs.datanode.du.reserved should be set to 10-15% of volume size
# For each host selects maximum size of the volume. Then gets minimum for all hosts.
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 9678dc1..17225d0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -24,12 +24,30 @@ class HDP21StackAdvisor(HDP206StackAdvisor):
childRecommendConfDict = {
"OOZIE": self.recommendOozieConfigurations,
"HIVE": self.recommendHiveConfigurations,
- "TEZ": self.recommendTezConfigurations
+ "TEZ": self.recommendTezConfigurations,
+ "STORM": self.recommendStormConfigurations,
+ "FALCON": self.recommendFalconConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
+ def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+ storm_mounts = [
+ ("storm.local.dir", ["NODEMANAGER", "NIMBUS"], "/hadoop/storm", "single")
+ ]
+
+ self.updateMountProperties("storm-site", storm_mounts, configurations, services, hosts)
+
+ def recommendFalconConfigurations(self, configurations, clusterData, services, hosts):
+ falcon_mounts = [
+ ("*.falcon.graph.storage.directory", "FALCON_SERVER", "/hadoop/falcon/data/lineage/graphdb", "single")
+ ]
+
+ self.updateMountProperties("falcon-startup.properties", falcon_mounts, configurations, services, hosts)
+
def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+ super(HDP21StackAdvisor, self).recommendOozieConfigurations(configurations, clusterData, services, hosts)
+
oozieSiteProperties = getSiteProperties(services['configurations'], 'oozie-site')
oozieEnvProperties = getSiteProperties(services['configurations'], 'oozie-env')
putOozieProperty = self.putProperty(configurations, "oozie-site", services)
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index a8a75e5..feafc04 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -44,10 +44,17 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
"RANGER": self.recommendRangerConfigurations,
"LOGSEARCH" : self.recommendLogsearchConfigurations,
"SPARK": self.recommendSparkConfigurations,
+ "KAFKA": self.recommendKafkaConfigurations,
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
+ def recommendKafkaConfigurations(self, configurations, clusterData, services, hosts):
+ kafka_mounts = [
+ ("log.dirs", "KAFKA_BROKER", "/kafka-logs", "multi")
+ ]
+
+ self.updateMountProperties("kafka-broker", kafka_mounts, configurations, services, hosts)
def recommendSparkConfigurations(self, configurations, clusterData, services, hosts):
"""
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index f6191f8..8148379 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -972,6 +972,18 @@ class DefaultStackAdvisor(StackAdvisor):
return None
return siteConfig.get("properties")
+ def getServicesSiteProperties(self, services, siteName):
+ if not services:
+ return None
+
+ configurations = services.get("configurations")
+ if not configurations:
+ return None
+ siteConfig = configurations.get(siteName)
+ if siteConfig is None:
+ return None
+ return siteConfig.get("properties")
+
def putProperty(self, config, configType, services=None):
userConfigs = {}
changedConfigs = []
@@ -1040,14 +1052,27 @@ class DefaultStackAdvisor(StackAdvisor):
config[configType]["property_attributes"][key][attribute] = attributeValue if isinstance(attributeValue, list) else str(attributeValue)
return appendPropertyAttribute
-
- """
- Returns the hosts which are running the given component.
- """
def getHosts(self, componentsList, componentName):
+ """
+ Returns the hosts which are running the given component.
+ """
hostNamesList = [component["hostnames"] for component in componentsList if component["component_name"] == componentName]
return hostNamesList[0] if len(hostNamesList) > 0 else []
+ def getMountPoints(self, hosts):
+ """
+ Return list of mounts available on the hosts
+
+ :type hosts dict
+ """
+ mount_points = []
+
+ for item in hosts["items"]:
+ if "disk_info" in item["Hosts"]:
+ mount_points.append(item["Hosts"]["disk_info"])
+
+ return mount_points
+
def isSecurityEnabled(self, services):
"""
Determines if security is enabled by testing the value of cluster-env/security enabled.
@@ -1084,3 +1109,179 @@ class DefaultStackAdvisor(StackAdvisor):
def getServiceNames(self, services):
return [service["StackServices"]["service_name"] for service in services["services"]]
+
+ def filterHostMounts(self, hosts, services):
+ """
+ Filter mounts on the host using agent_mounts_ignore_list, by excluding and record with mount-point
+ mentioned in agent_mounts_ignore_list.
+
+ This function updates hosts dictionary
+
+ Example:
+
+ agent_mounts_ignore_list : "/run/secrets"
+
+ Hosts record :
+
+ "disk_info" : [
+ {
+ ...
+ "mountpoint" : "/"
+ },
+ {
+ ...
+ "mountpoint" : "/run/secrets"
+ }
+ ]
+
+ Result would be :
+
+ "disk_info" : [
+ {
+ ...
+ "mountpoint" : "/"
+ }
+ ]
+
+ :type hosts dict
+ :type services dict
+ """
+ if not services or "items" not in hosts:
+ return hosts
+
+ banned_filesystems = ["devtmpfs", "tmpfs", "vboxsf", "cdfs"]
+ banned_mount_points = ["/etc/resolv.conf", "/etc/hostname", "/boot", "/mnt", "/tmp", "/run/secrets"]
+
+ cluster_env = self.getServicesSiteProperties(services, "cluster-env")
+ ignore_list = []
+
+ if cluster_env and "agent_mounts_ignore_list" in cluster_env and cluster_env["agent_mounts_ignore_list"].strip():
+ ignore_list = [x.strip() for x in cluster_env["agent_mounts_ignore_list"].strip().split(",")]
+
+ ignore_list.extend(banned_mount_points)
+
+ for host in hosts["items"]:
+ if "Hosts" not in host and "disk_info" not in host["Hosts"]:
+ continue
+
+ host = host["Hosts"]
+ disk_info = []
+
+ for disk in host["disk_info"]:
+ if disk["mountpoint"] not in ignore_list\
+ and disk["type"].lower() not in banned_filesystems:
+ disk_info.append(disk)
+
+ host["disk_info"] = disk_info
+
+ return hosts
+
+ def __getSameHostMounts(self, hosts):
+ """
+ Return list of the mounts which are same and present on all hosts
+
+ :type hosts dict
+ :rtype list
+ """
+ if not hosts:
+ return None
+
+ hostMounts = self.getMountPoints(hosts)
+ mounts = []
+ for m in hostMounts:
+ host_mounts = set([item["mountpoint"] for item in m])
+ mounts = host_mounts if not mounts else mounts & host_mounts
+
+ return sorted(mounts)
+
+ def getMountPathVariations(self, initial_value, component_name, services, hosts):
+ """
+ Recommends best fitted mount by prefixing path with it.
+
+ :return return list of paths with properly selected paths. If no recommendation possible,
+ would be returned empty list
+
+ :type initial_value str
+ :type component_name str
+ :type services dict
+ :type hosts dict
+ :rtype list
+ """
+ available_mounts = []
+
+ if not initial_value:
+ return available_mounts
+
+ mounts = self.__getSameHostMounts(hosts)
+ sep = "/"
+
+ if not mounts:
+ return available_mounts
+
+ for mount in mounts:
+ new_mount = initial_value if mount == "/" else os.path.join(mount + sep, initial_value.lstrip(sep))
+ if new_mount not in available_mounts:
+ available_mounts.append(new_mount)
+
+ # no list transformations after filling the list, because this will cause item order change
+ return available_mounts
+
+ def getMountPathVariation(self, initial_value, component_name, services, hosts):
+ """
+ Recommends best fitted mount by prefixing path with it.
+
+ :return return list of paths with properly selected paths. If no recommendation possible,
+ would be returned empty list
+
+ :type initial_value str
+ :type component_name str
+ :type services dict
+ :type hosts dict
+ :rtype str
+ """
+ try:
+ return [self.getMountPathVariations(initial_value, component_name, services, hosts)[0]]
+ except IndexError:
+ return []
+
+ def updateMountProperties(self, siteConfig, propertyDefinitions, configurations, services, hosts):
+ """
+ Update properties according to recommendations for available mount-points
+
+ propertyDefinitions is an array of set : property name, component name, initial value, recommendation type
+
+ Where,
+
+ property name - name of the property
+ component name, name of the component to which belongs this property
+ initial value - initial path
+ recommendation type - could be "multi" or "single". This describes recommendation strategy, to use only one disk
+ or use all available space on the host
+
+ :type propertyDefinitions list
+ :type siteConfig str
+ :type configurations dict
+ :type services dict
+ :type hosts dict
+ """
+
+ props = self.getServicesSiteProperties(services, siteConfig)
+ put_f = self.putProperty(configurations, siteConfig, services)
+
+ for prop_item in propertyDefinitions:
+ name, component, default_value, rc_type = prop_item
+ recommendation = None
+
+ if props is None or name not in props:
+ if rc_type == "multi":
+ recommendation = self.getMountPathVariations(default_value, component, services, hosts)
+ else:
+ recommendation = self.getMountPathVariation(default_value, component, services, hosts)
+ elif props and name in props and props[name] == default_value:
+ if rc_type == "multi":
+ recommendation = self.getMountPathVariations(default_value, component, services, hosts)
+ else:
+ recommendation = self.getMountPathVariation(default_value, component, services, hosts)
+
+ if recommendation:
+ put_f(name, ",".join(recommendation))
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 9595b9e..4b3397f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -1195,8 +1195,10 @@ class TestHDP206StackAdvisor(TestCase):
{'properties':
{'falcon_user': 'falcon'}},
'hdfs-site':
- {'properties':
+ {'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
'dfs.datanode.du.reserved': '10240000000'}},
'hive-env':
{'properties':
@@ -1330,6 +1332,8 @@ class TestHDP206StackAdvisor(TestCase):
'hdfs-site':
{'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
'dfs.datanode.du.reserved': '10240000000'}},
'hive-env':
{'properties':
@@ -1463,8 +1467,10 @@ class TestHDP206StackAdvisor(TestCase):
{'hive_user': 'hive',
'webhcat_user': 'webhcat'}},
'hdfs-site':
- {'properties':
+ {'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
'dfs.datanode.du.reserved': '10240000000'}},
'hadoop-env':
{'properties':
@@ -1484,10 +1490,12 @@ class TestHDP206StackAdvisor(TestCase):
expected["hdfs-site"] = {
'properties': {
- 'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '10240000000',
'dfs.internal.nameservices': 'mycluster',
- 'dfs.ha.namenodes.mycluster': 'nn1,nn2'
+ 'dfs.ha.namenodes.mycluster': 'nn1,nn2',
+ 'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
},
'property_attributes': {
'dfs.namenode.rpc-address': {
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
index 7835262..f9fb1f5 100644
--- a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
@@ -487,6 +487,8 @@ class TestHDP21StackAdvisor(TestCase):
"hdfs-site": {
"properties": {
'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
'dfs.datanode.du.reserved': '10240000000'
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 3cd05d3..c8da075 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -3068,7 +3068,9 @@ class TestHDP22StackAdvisor(TestCase):
'dfs.namenode.safemode.threshold-pct': '1.000',
'dfs.datanode.failed.volumes.tolerated': '1',
'dfs.namenode.handler.count': '25',
- 'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4'
+ 'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary'
},
'property_attributes': {
'dfs.datanode.failed.volumes.tolerated': {'maximum': '4'},
@@ -3743,7 +3745,11 @@ class TestHDP22StackAdvisor(TestCase):
"yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "/yarn",
"yarn.scheduler.maximum-allocation-mb": "39424",
"yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
- "hadoop.registry.rm.enabled": "false"
+ "hadoop.registry.rm.enabled": "false",
+ "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
+ "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
+ "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",
+ "yarn.nodemanager.log-dirs": "/hadoop/yarn/log,/dev/shm/hadoop/yarn/log,/vagrant/hadoop/yarn/log"
},
"property_attributes": {
"yarn.scheduler.minimum-allocation-vcores": {
@@ -3789,7 +3795,6 @@ class TestHDP22StackAdvisor(TestCase):
"yarn.nodemanager.linux-container-executor.group": "hadoop",
"yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
"yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
- "yarn.nodemanager.linux-container-executor.group": "hadoop",
"yarn.nodemanager.linux-container-executor.cgroups.mount": "true",
"yarn.nodemanager.resource.memory-mb": "39424",
"yarn.scheduler.minimum-allocation-mb": "3584",
@@ -3799,7 +3804,11 @@ class TestHDP22StackAdvisor(TestCase):
"yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "/yarn",
"yarn.scheduler.maximum-allocation-mb": "39424",
"yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
- "hadoop.registry.rm.enabled": "false"
+ "hadoop.registry.rm.enabled": "false",
+ "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
+ "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
+ "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",
+ "yarn.nodemanager.log-dirs": "/hadoop/yarn/log,/dev/shm/hadoop/yarn/log,/vagrant/hadoop/yarn/log"
},
"property_attributes": {
"yarn.nodemanager.linux-container-executor.cgroups.mount": {
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-server/src/test/python/stacks/test_stack_adviser.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/test_stack_adviser.py b/ambari-server/src/test/python/stacks/test_stack_adviser.py
new file mode 100644
index 0000000..8146a0c
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/test_stack_adviser.py
@@ -0,0 +1,239 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+from unittest import TestCase
+
+
+class TestBasicAdvisor(TestCase):
+ def setUp(self):
+ import imp
+ self.maxDiff = None
+ self.testDirectory = os.path.dirname(os.path.abspath(__file__))
+ stackAdvisorPath = os.path.abspath(os.path.join(self.testDirectory, '../../../main/resources/stacks/stack_advisor.py'))
+
+ default_sa_classname = 'DefaultStackAdvisor'
+
+ with open(stackAdvisorPath, 'rb') as fp:
+ stack_advisor_impl = imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+
+ clazz = getattr(stack_advisor_impl, default_sa_classname)
+ self.stackAdvisor = clazz()
+
+ def test_filterHostMounts(self):
+
+ filtered_mount = "/data"
+
+ hosts = {
+ "items": [
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "vboxsf"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "ext3"},
+ {"mountpoint": filtered_mount, "type": "ext3"},
+ ],
+ "public_host_name": "c6401.ambari.apache.org",
+ "host_name": "c6401.ambari.apache.org"
+ },
+ },
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm1", "type": "ext3"},
+ {"mountpoint": "/vagrant1", "type": "ext3"},
+ {"mountpoint": filtered_mount, "type": "ext3"}
+ ],
+ "public_host_name": "c6402.ambari.apache.org",
+ "host_name": "c6402.ambari.apache.org"
+ },
+ }
+ ]
+ }
+
+ services = {
+ "Versions": {
+ "parent_stack_version": "2.5",
+ "stack_name": "HDP",
+ "stack_version": "2.6",
+ "stack_hierarchy": {
+ "stack_name": "HDP",
+ "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+ }
+ },
+ "services": [
+ ],
+ "configurations": {
+ "cluster-env": {
+ "properties": {
+ "agent_mounts_ignore_list": filtered_mount
+ }
+ }
+ }
+ }
+
+ filtered_hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+
+ for host in filtered_hosts["items"]:
+ self.assertEquals(False, filtered_mount in host["Hosts"]["disk_info"])
+
+ def test_getMountPathVariations(self):
+
+ filtered_mount = "/data"
+
+ hosts = {
+ "items": [
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "vboxsf"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "ext3"},
+ {"mountpoint": filtered_mount, "type": "ext3"},
+ ],
+ "public_host_name": "c6401.ambari.apache.org",
+ "host_name": "c6401.ambari.apache.org"
+ },
+ },
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm1", "type": "ext3"},
+ {"mountpoint": "/vagrant1", "type": "ext3"},
+ {"mountpoint": filtered_mount, "type": "ext3"}
+ ],
+ "public_host_name": "c6402.ambari.apache.org",
+ "host_name": "c6402.ambari.apache.org"
+ },
+ }
+ ]
+ }
+
+ services = {
+ "Versions": {
+ "parent_stack_version": "2.5",
+ "stack_name": "HDP",
+ "stack_version": "2.6",
+ "stack_hierarchy": {
+ "stack_name": "HDP",
+ "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+ }
+ },
+ "services": [
+ ],
+ "configurations": {
+ "cluster-env": {
+ "properties": {
+ "agent_mounts_ignore_list": filtered_mount
+ }
+ }
+ }
+ }
+
+ hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+ avail_mounts = self.stackAdvisor.getMountPathVariations("/test/folder", "DATANODE", services, hosts)
+
+ self.assertEquals(True, avail_mounts is not None)
+ self.assertEquals(1, len(avail_mounts))
+ self.assertEquals("/test/folder", avail_mounts[0])
+
+ def test_updateMountProperties(self):
+ hosts = {
+ "items": [
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "vboxsf"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "ext3"},
+ {"mountpoint": "/data", "type": "ext3"},
+ ],
+ "public_host_name": "c6401.ambari.apache.org",
+ "host_name": "c6401.ambari.apache.org"
+ },
+ },
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm1", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "vboxsf"},
+ {"mountpoint": "/data", "type": "ext3"}
+ ],
+ "public_host_name": "c6402.ambari.apache.org",
+ "host_name": "c6402.ambari.apache.org"
+ },
+ }
+ ]
+ }
+
+ services = {
+ "Versions": {
+ "parent_stack_version": "2.5",
+ "stack_name": "HDP",
+ "stack_version": "2.6",
+ "stack_hierarchy": {
+ "stack_name": "HDP",
+ "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+ }
+ },
+ "services": [
+ ],
+ "configurations": {
+ "cluster-env": {
+ "properties": {
+ "agent_mounts_ignore_list": ""
+ }
+ },
+ "some-site": {
+ "path_prop": "/test"
+ }
+ }
+ }
+
+ pathProperties = [
+ ("path_prop", "DATANODE", "/test", "multi"),
+ ]
+
+ configurations = {}
+ hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+
+ self.stackAdvisor.updateMountProperties("some-site", pathProperties, configurations, services, hosts)
+
+ self.assertEquals("/test,/data/test", configurations["some-site"]["properties"]["path_prop"])
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-web/app/mixins.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins.js b/ambari-web/app/mixins.js
index 594ed74..7b4d6b4 100644
--- a/ambari-web/app/mixins.js
+++ b/ambari-web/app/mixins.js
@@ -69,4 +69,3 @@ require('mixins/common/widgets/widget_mixin');
require('mixins/common/widgets/widget_section');
require('mixins/unit_convert/base_unit_convert_mixin');
require('mixins/unit_convert/convert_unit_widget_view_mixin');
-require('utils/configs/mount_points_based_initializer_mixin');
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-web/app/utils/configs/config_initializer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/config_initializer.js b/ambari-web/app/utils/configs/config_initializer.js
index cb5b41f..de9ca7e 100644
--- a/ambari-web/app/utils/configs/config_initializer.js
+++ b/ambari-web/app/utils/configs/config_initializer.js
@@ -20,7 +20,6 @@ var App = require('app');
var stringUtils = require('utils/string_utils');
require('utils/configs/config_initializer_class');
-require('utils/configs/mount_points_based_initializer_mixin');
require('utils/configs/hosts_based_initializer_mixin');
/**
@@ -53,7 +52,7 @@ function getZKBasedConfig() {
*
* @instance ConfigInitializer
*/
-App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedInitializerMixin, App.HostsBasedInitializerMixin, {
+App.ConfigInitializer = App.ConfigInitializerClass.create(App.HostsBasedInitializerMixin, {
initializers: function() {
return {
@@ -111,26 +110,7 @@ App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedIn
'templeton.zookeeper.hosts': getZKBasedConfig(),
'hadoop.registry.zk.quorum': getZKBasedConfig(),
'hive.cluster.delegation.token.store.zookeeper.connectString': getZKBasedConfig(),
- 'instance.zookeeper.host': getZKBasedConfig(),
-
- 'dfs.name.dir': this.getMultipleMountPointsConfig('NAMENODE', 'file'),
- 'dfs.namenode.name.dir': this.getMultipleMountPointsConfig('NAMENODE', 'file'),
- 'dfs.data.dir': this.getMultipleMountPointsConfig('DATANODE', 'file'),
- 'dfs.datanode.data.dir': this.getMultipleMountPointsConfig('DATANODE', 'file'),
- 'yarn.nodemanager.local-dirs': this.getMultipleMountPointsConfig('NODEMANAGER'),
- 'yarn.nodemanager.log-dirs': this.getMultipleMountPointsConfig('NODEMANAGER'),
- 'mapred.local.dir': this.getMultipleMountPointsConfig(['TASKTRACKER', 'NODEMANAGER']),
- 'log.dirs': this.getMultipleMountPointsConfig('KAFKA_BROKER'),
-
- 'fs.checkpoint.dir': this.getSingleMountPointConfig('SECONDARY_NAMENODE', 'file'),
- 'dfs.namenode.checkpoint.dir': this.getSingleMountPointConfig('SECONDARY_NAMENODE', 'file'),
- 'yarn.timeline-service.leveldb-timeline-store.path': this.getSingleMountPointConfig('APP_TIMELINE_SERVER'),
- 'yarn.timeline-service.leveldb-state-store.path': this.getSingleMountPointConfig('APP_TIMELINE_SERVER'),
- 'dataDir': this.getSingleMountPointConfig('ZOOKEEPER_SERVER'),
- 'oozie_data_dir': this.getSingleMountPointConfig('OOZIE_SERVER'),
- 'storm.local.dir': this.getSingleMountPointConfig(['NODEMANAGER', 'NIMBUS']),
- '*.falcon.graph.storage.directory': this.getSingleMountPointConfig('FALCON_SERVER'),
- '*.falcon.graph.serialize.path': this.getSingleMountPointConfig('FALCON_SERVER')
+ 'instance.zookeeper.host': getZKBasedConfig()
}
}.property(''),
@@ -146,9 +126,7 @@ App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedIn
},
initializerTypes: [
- {name: 'zookeeper_based', method: '_initAsZookeeperServersList'},
- {name: 'single_mountpoint', method: '_initAsSingleMountPoint'},
- {name: 'multiple_mountpoints', method: '_initAsMultipleMountPoints'}
+ {name: 'zookeeper_based', method: '_initAsZookeeperServersList'}
],
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js b/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
deleted file mode 100644
index 59a3985..0000000
--- a/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
+++ /dev/null
@@ -1,340 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * Regexp used to determine if mount point is windows-like
- *
- * @type {RegExp}
- */
-var winRegex = /^([a-z]):\\?$/;
-
-App.MountPointsBasedInitializerMixin = Em.Mixin.create({
-
- /**
- * Map for methods used as value-modifiers for configProperties with values as mount point(s)
- * Used if mount point is win-like (@see winRegex)
- * Key: id
- * Value: method-name
- *
- * @type {{default: string, file: string, slashes: string}}
- */
- winReplacersMap: {
- default: '_defaultWinReplace',
- file: '_winReplaceWithFile',
- slashes: '_defaultWinReplaceWithAdditionalSlashes'
- },
-
- /**
- * Initializer for configs with value as one of the possible mount points
- * Only hosts that contains on the components from <code>initializer.components</code> are processed
- * Hosts with Windows needs additional processing (@see winReplacersMap)
- * Value example: '/', '/some/cool/dir'
- *
- * @param {configProperty} configProperty
- * @param {topologyLocalDB} localDB
- * @param {object} dependencies
- * @param {object} initializer
- * @return {Object}
- */
- _initAsSingleMountPoint: function (configProperty, localDB, dependencies, initializer) {
- var hostsInfo = this._updateHostInfo(localDB.hosts);
- var setOfHostNames = this._getSetOfHostNames(localDB, initializer);
- var winReplacersMap = this.get('winReplacersMap');
- // In Add Host Wizard, if we did not select this slave component for any host, then we don't process any further.
- if (!setOfHostNames.length) {
- return configProperty;
- }
- var allMountPoints = this._getAllMountPoints(setOfHostNames, hostsInfo, localDB);
-
- var mPoint = allMountPoints[0].mountpoint;
- if (mPoint === "/") {
- mPoint = Em.get(configProperty, 'recommendedValue');
- }
- else {
- var mp = mPoint.toLowerCase();
- if (winRegex.test(mp)) {
- var methodName = winReplacersMap[initializer.winReplacer];
- mPoint = this[methodName].call(this, configProperty, mp);
- }
- else {
- mPoint = mPoint + Em.get(configProperty, 'recommendedValue');
- }
- }
- Em.setProperties(configProperty, {
- value: mPoint,
- recommendedValue: mPoint
- });
-
- return configProperty;
- },
-
- /**
- * Initializer for configs with value as all of the possible mount points
- * Only hosts that contains on the components from <code>initializer.components</code> are processed
- * Hosts with Windows needs additional processing (@see winReplacersMap)
- * Value example: '/\n/some/cool/dir' (`\n` - is divider)
- *
- * @param {Object} configProperty
- * @param {topologyLocalDB} localDB
- * @param {object} dependencies
- * @param {object} initializer
- * @return {Object}
- */
- _initAsMultipleMountPoints: function (configProperty, localDB, dependencies, initializer) {
- var hostsInfo = this._updateHostInfo(localDB.hosts);
- var self = this;
- var setOfHostNames = this._getSetOfHostNames(localDB, initializer);
- var winReplacersMap = this.get('winReplacersMap');
- // In Add Host Wizard, if we did not select this slave component for any host, then we don't process any further.
- if (!setOfHostNames.length) {
- return configProperty;
- }
-
- var allMountPoints = this._getAllMountPoints(setOfHostNames, hostsInfo, localDB);
- var mPoint = '';
-
- allMountPoints.forEach(function (eachDrive) {
- if (eachDrive.mountpoint === '/') {
- mPoint += Em.get(configProperty, 'recommendedValue') + "\n";
- }
- else {
- var mp = eachDrive.mountpoint.toLowerCase();
- if (winRegex.test(mp)) {
- var methodName = winReplacersMap[initializer.winReplacer];
- mPoint += self[methodName].call(this, configProperty, mp);
- }
- else {
- mPoint += eachDrive.mountpoint + Em.get(configProperty, 'recommendedValue') + "\n";
- }
- }
- }, this);
-
- Em.setProperties(configProperty, {
- value: mPoint,
- recommendedValue: mPoint
- });
-
- return configProperty;
- },
-
- /**
- * Replace drive-based windows-path with 'file:///'
- *
- * @param {configProperty} configProperty
- * @param {string} mountPoint
- * @returns {string}
- * @private
- */
- _winReplaceWithFile: function (configProperty, mountPoint) {
- var winDriveUrl = mountPoint.toLowerCase().replace(winRegex, 'file:///$1:');
- return winDriveUrl + Em.get(configProperty, 'recommendedValue') + '\n';
- },
-
- /**
- * Replace drive-based windows-path
- *
- * @param {configProperty} configProperty
- * @param {string} mountPoint
- * @returns {string}
- * @private
- */
- _defaultWinReplace: function (configProperty, mountPoint) {
- var winDrive = mountPoint.toLowerCase().replace(winRegex, '$1:');
- var winDir = Em.get(configProperty, 'recommendedValue').replace(/\//g, '\\');
- return winDrive + winDir + '\n';
- },
-
- /**
- * Same to <code>_defaultWinReplace</code>, but with extra-slash in the end
- *
- * @param {configProperty} configProperty
- * @param {string} mountPoint
- * @returns {string}
- * @private
- */
- _defaultWinReplaceWithAdditionalSlashes: function (configProperty, mountPoint) {
- var winDrive = mountPoint.toLowerCase().replace(winRegex, '$1:');
- var winDir = Em.get(configProperty, 'recommendedValue').replace(/\//g, '\\\\');
- return winDrive + winDir + '\n';
- },
-
- /**
- * Update information from localDB using <code>App.Host</code>-model
- *
- * @param {object} hostsInfo
- * @returns {object}
- * @private
- */
- _updateHostInfo: function (hostsInfo) {
- App.Host.find().forEach(function (item) {
- if (!hostsInfo[item.get('id')]) {
- hostsInfo[item.get('id')] = {
- name: item.get('id'),
- cpu: item.get('cpu'),
- memory: item.get('memory'),
- disk_info: item.get('diskInfo'),
- bootStatus: "REGISTERED",
- isInstalled: true
- };
- }
- });
- return hostsInfo;
- },
-
- /**
- * Determines if mount point is valid
- * Criterias:
- * <ul>
- * <li>Should has available space</li>
- * <li>Should not be home-dir</li>
- * <li>Should not be docker-dir</li>
- * <li>Should not be boot-dir</li>
- * <li>Should not be dev-dir</li>
- * <li>Valid mount point started from /usr/hdp/ should be /usr/hdp/current
- * or /usr/hdp/<STACK_VERSION_NUMBER> e.g. /usr/hdp/2.5.0.0
- * </li>
- * </ul>
- *
- * @param {{mountpoint: string, available: number}} mPoint
- * @returns {function} true - valid, false - invalid
- * @private
- */
- _filterMountPoint: function (localDB) {
- var stackVersionNumber = [Em.getWithDefault(localDB.selectedStack || {}, 'repository_version', null)].compact();
- return function(mPoint) {
- var isAvailable = mPoint.available !== 0;
- if (!isAvailable) {
- return false;
- }
-
- var stackRoot = '/usr/hdp';
- var notHome = !['/', '/home'].contains(mPoint.mountpoint);
- var notDocker = !['/etc/resolv.conf', '/etc/hostname', '/etc/hosts'].contains(mPoint.mountpoint);
- var notBoot = mPoint.mountpoint && !(mPoint.mountpoint.startsWith('/boot')
- || mPoint.mountpoint.startsWith('/mnt')
- || mPoint.mountpoint.startsWith('/tmp'));
- var notDev = !(['devtmpfs', 'tmpfs', 'vboxsf', 'CDFS'].contains(mPoint.type));
- var validStackRootMount = !(mPoint.mountpoint.startsWith(stackRoot) && !['current'].concat(stackVersionNumber).filter(function(i) {
- return mPoint.mountpoint === stackRoot + '/' + i;
- }).length);
-
- return notHome && notDocker && notBoot && notDev && validStackRootMount;
- };
- },
-
- /**
- * Get list of hostNames from localDB which contains needed components
- *
- * @param {topologyLocalDB} localDB
- * @param {object} initializer
- * @returns {string[]}
- * @private
- */
- _getSetOfHostNames: function (localDB, initializer) {
- var masterComponentHostsInDB = Em.getWithDefault(localDB, 'masterComponentHosts', []);
- var slaveComponentHostsInDB = Em.getWithDefault(localDB, 'slaveComponentHosts', []);
- var hosts = masterComponentHostsInDB.filter(function (master) {
- return initializer.components.contains(master.component);
- }).mapProperty('hostName');
-
- var sHosts = slaveComponentHostsInDB.find(function (slave) {
- return initializer.components.contains(slave.componentName);
- });
- if (sHosts) {
- hosts = hosts.concat(sHosts.hosts.mapProperty('hostName'));
- }
- return hosts;
- },
-
- /**
- * Get list of all unique valid mount points for hosts
- *
- * @param {string[]} setOfHostNames
- * @param {object} hostsInfo
- * @param {topologyLocalDB} localDB
- * @returns {string[]}
- * @private
- */
- _getAllMountPoints: function (setOfHostNames, hostsInfo, localDB) {
- var allMountPoints = [],
- mountPointFilter = this._filterMountPoint(localDB);
- for (var i = 0; i < setOfHostNames.length; i++) {
- var hostname = setOfHostNames[i];
- var mountPointsPerHost = hostsInfo[hostname].disk_info;
- var mountPointAsRoot = mountPointsPerHost.findProperty('mountpoint', '/');
-
- // If Server does not send any host details information then atleast one mountpoint should be presumed as root
- // This happens in a single container Linux Docker environment.
- if (!mountPointAsRoot) {
- mountPointAsRoot = {
- mountpoint: '/'
- };
- }
-
- mountPointsPerHost.filter(mountPointFilter).forEach(function (mPoint) {
- if( !allMountPoints.findProperty("mountpoint", mPoint.mountpoint)) {
- allMountPoints.push(mPoint);
- }
- }, this);
- }
-
- if (!allMountPoints.length) {
- allMountPoints.push(mountPointAsRoot);
- }
- return allMountPoints;
- },
-
- /**
- * Settings for <code>single_mountpoint</code>-initializer
- * Used for configs with value as one of the possible mount points
- *
- * @see _initAsSingleMountPoint
- * @param {string|string[]} components
- * @param {string} winReplacer
- * @returns {{components: string[], winReplacer: string, type: string}}
- */
- getSingleMountPointConfig: function (components, winReplacer) {
- winReplacer = winReplacer || 'default';
- return {
- components: Em.makeArray(components),
- winReplacer: winReplacer,
- type: 'single_mountpoint'
- };
- },
-
- /**
- * Settings for <code>multiple_mountpoints</code>-initializer
- * Used for configs with value as all of the possible mount points
- *
- * @see _initAsMultipleMountPoints
- * @param {string|string[]} components
- * @param {string} winReplacer
- * @returns {{components: string[], winReplacer: string, type: string}}
- */
- getMultipleMountPointsConfig: function (components, winReplacer) {
- winReplacer = winReplacer || 'default';
- return {
- components: Em.makeArray(components),
- winReplacer: winReplacer,
- type: 'multiple_mountpoints'
- };
- }
-
-});
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-web/test/utils/ajax/ajax_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/utils/ajax/ajax_test.js b/ambari-web/test/utils/ajax/ajax_test.js
index d252c3d..3556e1e 100644
--- a/ambari-web/test/utils/ajax/ajax_test.js
+++ b/ambari-web/test/utils/ajax/ajax_test.js
@@ -29,11 +29,16 @@ describe('App.ajax', function() {
beforeEach(function() {
App.ajax.send.restore();
+ sinon.stub(App.logger, 'setTimer');
sinon.spy(App.ajax, 'send'); // no sense to test stubbed function, so going to spy on it
App.set('apiPrefix', '/api/v1');
App.set('clusterName', 'tdk');
});
+ afterEach(function() {
+ App.logger.setTimer.restore();
+ });
+
describe('#send', function() {
it('Without sender', function() {
expect(App.ajax.send({})).to.equal(null);
@@ -168,7 +173,7 @@ describe('App.ajax', function() {
});
});
});
-
+
describe('#abortRequests', function () {
var xhr = {
@@ -198,6 +203,6 @@ describe('App.ajax', function() {
it('should clear requests array', function () {
expect(requests).to.have.length(0);
});
-
+
});
});
http://git-wip-us.apache.org/repos/asf/ambari/blob/bb8be5ba/ambari-web/test/utils/configs/config_initializer_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/utils/configs/config_initializer_test.js b/ambari-web/test/utils/configs/config_initializer_test.js
index 19ba03d..544ab1d 100644
--- a/ambari-web/test/utils/configs/config_initializer_test.js
+++ b/ambari-web/test/utils/configs/config_initializer_test.js
@@ -905,312 +905,6 @@ describe('App.ConfigInitializer', function () {
});
- describe('config with mount points', function () {
-
- var localDB = {
- masterComponentHosts: [
- {
- component: 'NAMENODE',
- hostName: 'h0'
- },
- {
- component: 'SECONDARY_NAMENODE',
- hostName: 'h4'
- },
- {
- component: 'APP_TIMELINE_SERVER',
- hostName: 'h0'
- },
- {
- component: 'ZOOKEEPER_SERVER',
- hostName: 'h0'
- },
- {
- component: 'ZOOKEEPER_SERVER',
- hostName: 'h1'
- },
- {
- component: 'OOZIE_SERVER',
- hostName: 'h0'
- },
- {
- component: 'OOZIE_SERVER',
- hostName: 'h1'
- },
- {
- component: 'NIMBUS',
- hostName: 'h2'
- },
- {
- component: 'FALCON_SERVER',
- hostName: 'h3'
- },
- {
- component: 'KAFKA_BROKER',
- hostName: 'h0'
- },
- {
- component: 'KAFKA_BROKER',
- hostName: 'h1'
- }
- ],
- slaveComponentHosts: [
- {
- componentName: 'DATANODE',
- hosts: [
- {
- hostName: 'h0'
- },
- {
- hostName: 'h1'
- }
- ]
- },
- {
- componentName: 'TASKTRACKER',
- hosts: [
- {
- hostName: 'h0'
- },
- {
- hostName: 'h1'
- }
- ]
- },
- {
- componentName: 'NODEMANAGER',
- hosts: [
- {
- hostName: 'h0'
- },
- {
- hostName: 'h1'
- },
- {
- hostName: 'h4'
- }
- ]
- },
- {
- componentName: 'HBASE_REGIONSERVER',
- hosts: [
- {
- hostName: 'h0'
- },
- {
- hostName: 'h1'
- }
- ]
- },
- {
- componentName: 'SUPERVISOR',
- hosts: [
- {
- hostName: 'h0'
- },
- {
- hostName: 'h1'
- }
- ]
- }
- ],
- hosts: {
- h0: {
- disk_info: [
- {
- mountpoint: '/'
- },
- {
- mountpoint: '/home'
- },
- {
- mountpoint: '/boot'
- },
- {
- mountpoint: '/boot/efi'
- },
- {
- mountpoint: '/mnt'
- },
- {
- mountpoint: '/mnt/efi'
- },
- {
- mountpoint: '/media/disk0',
- available: '100000000'
- },
- {
- mountpoint: '/mount0',
- available: '100000000'
- }
- ]
- },
- h4: {
- disk_info: [
- {
- mountpoint: 'c:',
- available: '100000000'
- }
- ]
- }
- }
- },
- cases = [
- {
- name: 'dfs.namenode.name.dir',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n'
- },
- {
- name: 'dfs.name.dir',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n'
- },
- {
- name: 'fs.checkpoint.dir',
- isOnlyFirstOneNeeded: true,
- value: 'file:///c:/default\n'
- },
- {
- name: 'dfs.namenode.checkpoint.dir',
- isOnlyFirstOneNeeded: true,
- value: 'file:///c:/default\n'
- },
- {
- name: 'dfs.data.dir',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
- },
- {
- name: 'dfs.datanode.data.dir',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
- },
- {
- name: 'mapred.local.dir',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
- },
- {
- name: 'yarn.nodemanager.log-dirs',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\nc:\\default\n'
- },
- {
- name: 'yarn.nodemanager.local-dirs',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\nc:\\default\n'
- },
- {
- name: 'yarn.timeline-service.leveldb-timeline-store.path',
- isOnlyFirstOneNeeded: true,
- value: '/media/disk0/default'
- },
- {
- name: 'yarn.timeline-service.leveldb-state-store.path',
- isOnlyFirstOneNeeded: true,
- value: '/media/disk0/default'
- },
- {
- name: 'dataDir',
- isOnlyFirstOneNeeded: true,
- value: '/media/disk0/default'
- },
- {
- name: 'oozie_data_dir',
- isOnlyFirstOneNeeded: true,
- value: '/media/disk0/default'
- },
- {
- name: 'storm.local.dir',
- isOnlyFirstOneNeeded: true,
- value: '/media/disk0/default'
- },
- {
- name: '*.falcon.graph.storage.directory',
- isOnlyFirstOneNeeded: true,
- value: '/default'
- },
- {
- name: '*.falcon.graph.serialize.path',
- isOnlyFirstOneNeeded: true,
- value: '/default'
- },
- {
- name: 'log.dirs',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
- }
- ];
-
- beforeEach(function () {
- sinon.stub(App.Host, 'find').returns([
- Em.Object.create({
- id: 'h1',
- diskInfo: [
- {
- mountpoint: '/media/disk1',
- type: 'devtmpfs'
- },
- {
- mountpoint: '/media/disk1',
- type: 'tmpfs'
- },
- {
- mountpoint: '/media/disk1',
- type: 'vboxsf'
- },
- {
- mountpoint: '/media/disk1',
- type: 'CDFS'
- },
- {
- mountpoint: '/media/disk1',
- available: '0'
- },
- {
- mountpoint: '/media/disk1',
- available: '100000000'
- },
- {
- mountpoint: '/mount1',
- available: '100000000'
- }
- ]
- }),
- Em.Object.create({
- id: 'h2',
- diskInfo: [
- {
- mountpoint: '/'
- }
- ]
- }),
- Em.Object.create({
- id: 'h3',
- diskInfo: []
- })
- ]);
- });
-
- afterEach(function () {
- App.Host.find.restore();
- });
-
- cases.forEach(function (item) {
- it(item.name, function () {
- serviceConfigProperty.setProperties({
- name: item.name,
- recommendedValue: '/default'
- });
- App.ConfigInitializer.initialValue(serviceConfigProperty, localDB, {});
- expect(serviceConfigProperty.get('value')).to.equal(item.value);
- expect(serviceConfigProperty.get('recommendedValue')).to.equal(item.value);
- });
- });
-
- });
-
describe('initializerTypes', function () {
var types = App.ConfigInitializer.get('initializerTypes');
Em.keys(types).forEach(function(type) {
@@ -1257,155 +951,4 @@ describe('App.ConfigInitializer', function () {
});
- describe('#_filterMountPoint', function() {
- [
- {
- mPoint: {
- mountpoint: '/'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/home'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/etc/resolv.conf'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/etc/hostname'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/etc/hosts'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/boot'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/mnt'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/tmp'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/some-dir',
- type: 'devtmpfs'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/some-dir',
- type: 'tmpfs'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/some-dir',
- type: 'vboxsf'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/some-dir',
- type: 'CDFS'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/usr/hdp'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/usr/hdp/1'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/usr/hdp/current'
- },
- localDB: {},
- e: true
- },
- {
- mPoint: {
- mountpoint: '/usr/hdp/2.5'
- },
- localDB: {
- selectedStack: {
- repository_version: '2.5'
- }
- },
- e: true
- },
- {
- mPoint: {
- mountpoint: '/usr/hdp/2.5.0'
- },
- localDB: {
- selectedStack: {
- repository_version: '2.5'
- }
- },
- e: false
- },
- {
- mPoint: {
- mountpoint: '/normal/directory'
- },
- localDB: {
- selectedStack: {
- repository_version: '2.5'
- }
- },
- e: true
- }
- ].forEach(function(test) {
- it('mount point "{0}" should be {1}'.format(test.mPoint.mountpoint, test.e ? 'valid' : 'invalid'), function() {
- var fFn = App.ConfigInitializer._filterMountPoint(test.localDB);
- expect(fFn(test.mPoint)).to.be.equal(test.e);
- });
- });
- });
});
[2/2] ambari git commit: AMBARI-18713. use exclude list of mount
device types on docker containers (dgrinenko via dlysnichenko)
Posted by dm...@apache.org.
AMBARI-18713. use exclude list of mount device types on docker containers (dgrinenko via dlysnichenko)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ab6d5523
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ab6d5523
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ab6d5523
Branch: refs/heads/branch-2.5
Commit: ab6d552340bbe601209509556c5dead8ba7d14ae
Parents: 3a34213
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Thu Dec 1 17:38:40 2016 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Thu Dec 1 17:38:40 2016 +0200
----------------------------------------------------------------------
.../src/main/resources/scripts/stack_advisor.py | 11 +-
.../HDP/2.0.6/configuration/cluster-env.xml | 10 +
.../stacks/HDP/2.0.6/services/stack_advisor.py | 47 +-
.../stacks/HDP/2.1/services/stack_advisor.py | 20 +-
.../stacks/HDP/2.2/services/stack_advisor.py | 7 +
.../src/main/resources/stacks/stack_advisor.py | 209 ++++++++-
.../stacks/2.0.6/common/test_stack_advisor.py | 16 +-
.../stacks/2.1/common/test_stack_advisor.py | 2 +
.../stacks/2.2/common/test_stack_advisor.py | 46 +-
.../test/python/stacks/test_stack_adviser.py | 239 ++++++++++
ambari-web/app/mixins.js | 1 -
.../app/utils/configs/config_initializer.js | 28 +-
.../mount_points_based_initializer_mixin.js | 340 --------------
ambari-web/test/utils/ajax/ajax_test.js | 9 +-
.../utils/configs/config_initializer_test.js | 457 -------------------
15 files changed, 576 insertions(+), 866 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-server/src/main/resources/scripts/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/stack_advisor.py b/ambari-server/src/main/resources/scripts/stack_advisor.py
index 5926c39..abfab87 100755
--- a/ambari-server/src/main/resources/scripts/stack_advisor.py
+++ b/ambari-server/src/main/resources/scripts/stack_advisor.py
@@ -70,13 +70,11 @@ def main(argv=None):
if len(args) < 3:
sys.stderr.write(USAGE)
sys.exit(2)
- pass
action = args[0]
if action not in ALL_ACTIONS:
sys.stderr.write(USAGE)
sys.exit(2)
- pass
hostsFile = args[1]
servicesFile = args[2]
@@ -89,6 +87,7 @@ def main(argv=None):
stackName = services["Versions"]["stack_name"]
stackVersion = services["Versions"]["stack_version"]
parentVersions = []
+
if "stack_hierarchy" in services["Versions"]:
parentVersions = services["Versions"]["stack_hierarchy"]["stack_versions"]
@@ -96,8 +95,9 @@ def main(argv=None):
# Perform action
actionDir = os.path.realpath(os.path.dirname(args[1]))
- result = {}
- result_file = "non_valid_result_file.json"
+
+ # filter
+ hosts = stackAdvisor.filterHostMounts(hosts, services)
if action == RECOMMEND_COMPONENT_LAYOUT_ACTION:
result = stackAdvisor.recommendComponentLayout(services, hosts)
@@ -111,12 +111,11 @@ def main(argv=None):
elif action == RECOMMEND_CONFIGURATION_DEPENDENCIES:
result = stackAdvisor.recommendConfigurationDependencies(services, hosts)
result_file = os.path.join(actionDir, "configurations.json")
- else: # action == VALIDATE_CONFIGURATIONS
+ else: # action == VALIDATE_CONFIGURATIONS
result = stackAdvisor.validateConfigurations(services, hosts)
result_file = os.path.join(actionDir, "configurations-validation.json")
dumpJson(result, result_file)
- pass
def instantiateStackAdvisor(stackName, stackVersion, parentVersions):
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index cc6c8a3..93680bf 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -280,4 +280,14 @@ gpgcheck=0</value>
<description>YARN Memory widget should be hidden by default on the dashboard.</description>
<on-ambari-upgrade add="true"/>
</property>
+ <property>
+ <name>agent_mounts_ignore_list</name>
+ <value/>
+ <description>Comma separated list of the mounts which would be ignored by Ambari during property values suggestion by Stack Advisor</description>
+ <on-ambari-upgrade add="false"/>
+ <value-attributes>
+ <visible>true</visible>
+ <empty-value-valid>true</empty-value-valid>
+ </value-attributes>
+ </property>
</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 0c74c02..e47743e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -100,9 +100,23 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
"HBASE": self.recommendHbaseConfigurations,
"STORM": self.recommendStormConfigurations,
"AMBARI_METRICS": self.recommendAmsConfigurations,
- "RANGER": self.recommendRangerConfigurations
+ "RANGER": self.recommendRangerConfigurations,
+ "ZOOKEEPER": self.recommendZookeeperConfigurations,
+ "OOZIE": self.recommendOozieConfigurations
}
+ def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+ oozie_mount_properties = [
+ ("oozie_data_dir", "OOZIE_SERVER", "/hadoop/oozie/data", "single"),
+ ]
+ self.updateMountProperties("oozie-env", oozie_mount_properties, configurations, services, hosts)
+
+ def recommendZookeeperConfigurations(self, configurations, clusterData, services, hosts):
+ zk_mount_properties = [
+ ("dataDir", "ZOOKEEPER_SERVER", "/hadoop/zookeeper", "single"),
+ ]
+ self.updateMountProperties("zoo.cfg", zk_mount_properties, configurations, services, hosts)
+
def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
putYarnProperty = self.putProperty(configurations, "yarn-site", services)
putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
@@ -115,6 +129,15 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
putYarnEnvProperty('min_user_id', self.get_system_min_uid())
+ yarn_mount_properties = [
+ ("yarn.nodemanager.local-dirs", "NODEMANAGER", "/hadoop/yarn/local", "multi"),
+ ("yarn.nodemanager.log-dirs", "NODEMANAGER", "/hadoop/yarn/log", "multi"),
+ ("yarn.timeline-service.leveldb-timeline-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single"),
+ ("yarn.timeline-service.leveldb-state-store.path", "APP_TIMELINE_SERVER", "/hadoop/yarn/timeline", "single")
+ ]
+
+ self.updateMountProperties("yarn-site", yarn_mount_properties, configurations, services, hosts)
+
sc_queue_name = self.recommendYarnQueue(services, "yarn-env", "service_check.queue.name")
if sc_queue_name is not None:
putYarnEnvProperty("service_check.queue.name", sc_queue_name)
@@ -145,6 +168,13 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
+
+ mapred_mounts = [
+ ("mapred.local.dir", ["TASKTRACKER", "NODEMANAGER"], "/hadoop/mapred", "multi")
+ ]
+
+ self.updateMountProperties("mapred-site", mapred_mounts, configurations, services, hosts)
+
mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
if mr_queue is not None:
putMapredProperty("mapreduce.job.queuename", mr_queue)
@@ -341,12 +371,15 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
if len(namenodes.split(',')) > 1:
putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true")
- #Initialize default 'dfs.datanode.data.dir' if needed
- if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):
- dataDirs = '/hadoop/hdfs/data'
- putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)
- else:
- dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
+ hdfs_mount_properties = [
+ ("dfs.datanode.data.dir", "DATANODE", "/hadoop/hdfs/data", "multi"),
+ ("dfs.namenode.name.dir", "DATANODE", "/hadoop/hdfs/namenode", "multi"),
+ ("dfs.namenode.checkpoint.dir", "SECONDARY_NAMENODE", "/hadoop/hdfs/namesecondary", "single")
+ ]
+
+ self.updateMountProperties("hdfs-site", hdfs_mount_properties, configurations, services, hosts)
+
+ dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
# dfs.datanode.du.reserved should be set to 10-15% of volume size
# For each host selects maximum size of the volume. Then gets minimum for all hosts.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
index 9678dc1..17225d0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/stack_advisor.py
@@ -24,12 +24,30 @@ class HDP21StackAdvisor(HDP206StackAdvisor):
childRecommendConfDict = {
"OOZIE": self.recommendOozieConfigurations,
"HIVE": self.recommendHiveConfigurations,
- "TEZ": self.recommendTezConfigurations
+ "TEZ": self.recommendTezConfigurations,
+ "STORM": self.recommendStormConfigurations,
+ "FALCON": self.recommendFalconConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
+ def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+ storm_mounts = [
+ ("storm.local.dir", ["NODEMANAGER", "NIMBUS"], "/hadoop/storm", "single")
+ ]
+
+ self.updateMountProperties("storm-site", storm_mounts, configurations, services, hosts)
+
+ def recommendFalconConfigurations(self, configurations, clusterData, services, hosts):
+ falcon_mounts = [
+ ("*.falcon.graph.storage.directory", "FALCON_SERVER", "/hadoop/falcon/data/lineage/graphdb", "single")
+ ]
+
+ self.updateMountProperties("falcon-startup.properties", falcon_mounts, configurations, services, hosts)
+
def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+ super(HDP21StackAdvisor, self).recommendOozieConfigurations(configurations, clusterData, services, hosts)
+
oozieSiteProperties = getSiteProperties(services['configurations'], 'oozie-site')
oozieEnvProperties = getSiteProperties(services['configurations'], 'oozie-env')
putOozieProperty = self.putProperty(configurations, "oozie-site", services)
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 4854514..4f0a9d0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -44,10 +44,17 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
"RANGER": self.recommendRangerConfigurations,
"LOGSEARCH" : self.recommendLogsearchConfigurations,
"SPARK": self.recommendSparkConfigurations,
+ "KAFKA": self.recommendKafkaConfigurations,
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
+ def recommendKafkaConfigurations(self, configurations, clusterData, services, hosts):
+ kafka_mounts = [
+ ("log.dirs", "KAFKA_BROKER", "/kafka-logs", "multi")
+ ]
+
+ self.updateMountProperties("kafka-broker", kafka_mounts, configurations, services, hosts)
def recommendSparkConfigurations(self, configurations, clusterData, services, hosts):
"""
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index f6191f8..8148379 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -972,6 +972,18 @@ class DefaultStackAdvisor(StackAdvisor):
return None
return siteConfig.get("properties")
+ def getServicesSiteProperties(self, services, siteName):
+ if not services:
+ return None
+
+ configurations = services.get("configurations")
+ if not configurations:
+ return None
+ siteConfig = configurations.get(siteName)
+ if siteConfig is None:
+ return None
+ return siteConfig.get("properties")
+
def putProperty(self, config, configType, services=None):
userConfigs = {}
changedConfigs = []
@@ -1040,14 +1052,27 @@ class DefaultStackAdvisor(StackAdvisor):
config[configType]["property_attributes"][key][attribute] = attributeValue if isinstance(attributeValue, list) else str(attributeValue)
return appendPropertyAttribute
-
- """
- Returns the hosts which are running the given component.
- """
def getHosts(self, componentsList, componentName):
+ """
+ Returns the hosts which are running the given component.
+ """
hostNamesList = [component["hostnames"] for component in componentsList if component["component_name"] == componentName]
return hostNamesList[0] if len(hostNamesList) > 0 else []
+ def getMountPoints(self, hosts):
+ """
+ Return list of mounts available on the hosts
+
+ :type hosts dict
+ """
+ mount_points = []
+
+ for item in hosts["items"]:
+ if "disk_info" in item["Hosts"]:
+ mount_points.append(item["Hosts"]["disk_info"])
+
+ return mount_points
+
def isSecurityEnabled(self, services):
"""
Determines if security is enabled by testing the value of cluster-env/security enabled.
@@ -1084,3 +1109,179 @@ class DefaultStackAdvisor(StackAdvisor):
def getServiceNames(self, services):
return [service["StackServices"]["service_name"] for service in services["services"]]
+
+ def filterHostMounts(self, hosts, services):
+ """
+ Filter mounts on the host using agent_mounts_ignore_list, by excluding and record with mount-point
+ mentioned in agent_mounts_ignore_list.
+
+ This function updates hosts dictionary
+
+ Example:
+
+ agent_mounts_ignore_list : "/run/secrets"
+
+ Hosts record :
+
+ "disk_info" : [
+ {
+ ...
+ "mountpoint" : "/"
+ },
+ {
+ ...
+ "mountpoint" : "/run/secrets"
+ }
+ ]
+
+ Result would be :
+
+ "disk_info" : [
+ {
+ ...
+ "mountpoint" : "/"
+ }
+ ]
+
+ :type hosts dict
+ :type services dict
+ """
+ if not services or "items" not in hosts:
+ return hosts
+
+ banned_filesystems = ["devtmpfs", "tmpfs", "vboxsf", "cdfs"]
+ banned_mount_points = ["/etc/resolv.conf", "/etc/hostname", "/boot", "/mnt", "/tmp", "/run/secrets"]
+
+ cluster_env = self.getServicesSiteProperties(services, "cluster-env")
+ ignore_list = []
+
+ if cluster_env and "agent_mounts_ignore_list" in cluster_env and cluster_env["agent_mounts_ignore_list"].strip():
+ ignore_list = [x.strip() for x in cluster_env["agent_mounts_ignore_list"].strip().split(",")]
+
+ ignore_list.extend(banned_mount_points)
+
+ for host in hosts["items"]:
+ if "Hosts" not in host and "disk_info" not in host["Hosts"]:
+ continue
+
+ host = host["Hosts"]
+ disk_info = []
+
+ for disk in host["disk_info"]:
+ if disk["mountpoint"] not in ignore_list\
+ and disk["type"].lower() not in banned_filesystems:
+ disk_info.append(disk)
+
+ host["disk_info"] = disk_info
+
+ return hosts
+
+ def __getSameHostMounts(self, hosts):
+ """
+ Return list of the mounts which are same and present on all hosts
+
+ :type hosts dict
+ :rtype list
+ """
+ if not hosts:
+ return None
+
+ hostMounts = self.getMountPoints(hosts)
+ mounts = []
+ for m in hostMounts:
+ host_mounts = set([item["mountpoint"] for item in m])
+ mounts = host_mounts if not mounts else mounts & host_mounts
+
+ return sorted(mounts)
+
+ def getMountPathVariations(self, initial_value, component_name, services, hosts):
+ """
+ Recommends best fitted mount by prefixing path with it.
+
+ :return return list of paths with properly selected paths. If no recommendation possible,
+ would be returned empty list
+
+ :type initial_value str
+ :type component_name str
+ :type services dict
+ :type hosts dict
+ :rtype list
+ """
+ available_mounts = []
+
+ if not initial_value:
+ return available_mounts
+
+ mounts = self.__getSameHostMounts(hosts)
+ sep = "/"
+
+ if not mounts:
+ return available_mounts
+
+ for mount in mounts:
+ new_mount = initial_value if mount == "/" else os.path.join(mount + sep, initial_value.lstrip(sep))
+ if new_mount not in available_mounts:
+ available_mounts.append(new_mount)
+
+ # no list transformations after filling the list, because this will cause item order change
+ return available_mounts
+
+ def getMountPathVariation(self, initial_value, component_name, services, hosts):
+ """
+ Recommends best fitted mount by prefixing path with it.
+
+ :return return list of paths with properly selected paths. If no recommendation possible,
+ would be returned empty list
+
+ :type initial_value str
+ :type component_name str
+ :type services dict
+ :type hosts dict
+ :rtype str
+ """
+ try:
+ return [self.getMountPathVariations(initial_value, component_name, services, hosts)[0]]
+ except IndexError:
+ return []
+
+ def updateMountProperties(self, siteConfig, propertyDefinitions, configurations, services, hosts):
+ """
+ Update properties according to recommendations for available mount-points
+
+ propertyDefinitions is an array of set : property name, component name, initial value, recommendation type
+
+ Where,
+
+ property name - name of the property
+ component name, name of the component to which belongs this property
+ initial value - initial path
+ recommendation type - could be "multi" or "single". This describes recommendation strategy, to use only one disk
+ or use all available space on the host
+
+ :type propertyDefinitions list
+ :type siteConfig str
+ :type configurations dict
+ :type services dict
+ :type hosts dict
+ """
+
+ props = self.getServicesSiteProperties(services, siteConfig)
+ put_f = self.putProperty(configurations, siteConfig, services)
+
+ for prop_item in propertyDefinitions:
+ name, component, default_value, rc_type = prop_item
+ recommendation = None
+
+ if props is None or name not in props:
+ if rc_type == "multi":
+ recommendation = self.getMountPathVariations(default_value, component, services, hosts)
+ else:
+ recommendation = self.getMountPathVariation(default_value, component, services, hosts)
+ elif props and name in props and props[name] == default_value:
+ if rc_type == "multi":
+ recommendation = self.getMountPathVariations(default_value, component, services, hosts)
+ else:
+ recommendation = self.getMountPathVariation(default_value, component, services, hosts)
+
+ if recommendation:
+ put_f(name, ",".join(recommendation))
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 4c6c2a3..4fa9e02 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -1195,8 +1195,10 @@ class TestHDP206StackAdvisor(TestCase):
{'properties':
{'falcon_user': 'falcon'}},
'hdfs-site':
- {'properties':
+ {'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
'dfs.datanode.du.reserved': '10240000000'}},
'hive-env':
{'properties':
@@ -1330,6 +1332,8 @@ class TestHDP206StackAdvisor(TestCase):
'hdfs-site':
{'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
'dfs.datanode.du.reserved': '10240000000'}},
'hive-env':
{'properties':
@@ -1463,8 +1467,10 @@ class TestHDP206StackAdvisor(TestCase):
{'hive_user': 'hive',
'webhcat_user': 'webhcat'}},
'hdfs-site':
- {'properties':
+ {'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
'dfs.datanode.du.reserved': '10240000000'}},
'hadoop-env':
{'properties':
@@ -1484,10 +1490,12 @@ class TestHDP206StackAdvisor(TestCase):
expected["hdfs-site"] = {
'properties': {
- 'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '10240000000',
'dfs.internal.nameservices': 'mycluster',
- 'dfs.ha.namenodes.mycluster': 'nn1,nn2'
+ 'dfs.ha.namenodes.mycluster': 'nn1,nn2',
+ 'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
},
'property_attributes': {
'dfs.namenode.rpc-address': {
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
index 7835262..f9fb1f5 100644
--- a/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/common/test_stack_advisor.py
@@ -487,6 +487,8 @@ class TestHDP21StackAdvisor(TestCase):
"hdfs-site": {
"properties": {
'dfs.datanode.data.dir': '/hadoop/hdfs/data',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
'dfs.datanode.du.reserved': '10240000000'
}
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index dce32d3..9d8ba08 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -3068,7 +3068,9 @@ class TestHDP22StackAdvisor(TestCase):
'dfs.namenode.safemode.threshold-pct': '1.000',
'dfs.datanode.failed.volumes.tolerated': '1',
'dfs.namenode.handler.count': '25',
- 'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4'
+ 'dfs.datanode.data.dir': '/path/1,/path/2,/path/3,/path/4',
+ 'dfs.namenode.name.dir': '/hadoop/hdfs/namenode',
+ 'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary'
},
'property_attributes': {
'dfs.datanode.failed.volumes.tolerated': {'maximum': '4'},
@@ -3681,12 +3683,9 @@ class TestHDP22StackAdvisor(TestCase):
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
- {"mountpoint" : "/"},
- {"mountpoint" : "/dev/shm"},
- {"mountpoint" : "/vagrant"},
- {"mountpoint" : "/"},
- {"mountpoint" : "/dev/shm"},
- {"mountpoint" : "/vagrant"}
+ {"mountpoint" : "/", "type": "ext3"},
+ {"mountpoint" : "/dev/shm", "type": "tmpfs"},
+ {"mountpoint" : "/vagrant", "type": "vboxsf"}
],
"public_host_name" : "c6401.ambari.apache.org",
"host_name" : "c6401.ambari.apache.org"
@@ -3736,15 +3735,19 @@ class TestHDP22StackAdvisor(TestCase):
"yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor",
"yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
"yarn.nodemanager.container-executor.cgroups.mount": "true",
- "yarn.nodemanager.resource.memory-mb": "39424",
- "yarn.scheduler.minimum-allocation-mb": "3584",
+ "yarn.nodemanager.resource.memory-mb": "43008",
+ "yarn.scheduler.minimum-allocation-mb": "14336",
"yarn.scheduler.maximum-allocation-vcores": "4",
"yarn.scheduler.minimum-allocation-vcores": "1",
"yarn.nodemanager.resource.cpu-vcores": "4",
"yarn.nodemanager.container-executor.cgroups.hierarchy": " /yarn",
- "yarn.scheduler.maximum-allocation-mb": "39424",
+ "yarn.scheduler.maximum-allocation-mb": "43008",
"yarn.nodemanager.container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
- "hadoop.registry.rm.enabled": "false"
+ "hadoop.registry.rm.enabled": "false",
+ "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
+ "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
+ "yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
+ "yarn.nodemanager.log-dirs": "/hadoop/yarn/log"
},
"property_attributes": {
"yarn.scheduler.minimum-allocation-vcores": {
@@ -3757,18 +3760,19 @@ class TestHDP22StackAdvisor(TestCase):
"maximum": "49152"
},
"yarn.scheduler.minimum-allocation-mb": {
- "maximum": "39424"
+ "maximum": "43008"
},
"yarn.nodemanager.resource.cpu-vcores": {
"maximum": "12"
},
"yarn.scheduler.maximum-allocation-mb": {
- "maximum": "39424"
+ "maximum": "43008"
}
}
}
}
+ hosts = self.stackAdvisor.filterHostMounts(hosts, services)
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(clusterData['hbaseRam'], 8)
@@ -3792,15 +3796,19 @@ class TestHDP22StackAdvisor(TestCase):
"yarn.nodemanager.linux-container-executor.cgroups.mount-path": "/cgroup",
"yarn.nodemanager.linux-container-executor.group": "hadoop",
"yarn.nodemanager.container-executor.cgroups.mount": "true",
- "yarn.nodemanager.resource.memory-mb": "39424",
- "yarn.scheduler.minimum-allocation-mb": "3584",
+ "yarn.nodemanager.resource.memory-mb": "43008",
+ "yarn.scheduler.minimum-allocation-mb": "14336",
"yarn.scheduler.maximum-allocation-vcores": "4",
"yarn.scheduler.minimum-allocation-vcores": "1",
"yarn.nodemanager.resource.cpu-vcores": "4",
"yarn.nodemanager.container-executor.cgroups.hierarchy": " /yarn",
- "yarn.scheduler.maximum-allocation-mb": "39424",
+ "yarn.scheduler.maximum-allocation-mb": "43008",
"yarn.nodemanager.container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
- "hadoop.registry.rm.enabled": "false"
+ "hadoop.registry.rm.enabled": "false",
+ "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
+ "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
+ "yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
+ "yarn.nodemanager.log-dirs": "/hadoop/yarn/log"
},
"property_attributes": {
"yarn.nodemanager.container-executor.cgroups.mount": {
@@ -3822,13 +3830,13 @@ class TestHDP22StackAdvisor(TestCase):
"maximum": "49152"
},
"yarn.scheduler.minimum-allocation-mb": {
- "maximum": "39424"
+ "maximum": "43008"
},
"yarn.nodemanager.resource.cpu-vcores": {
"maximum": "12"
},
"yarn.scheduler.maximum-allocation-mb": {
- "maximum": "39424"
+ "maximum": "43008"
},
"yarn.nodemanager.container-executor.resources-handler.class": {
"delete": "true"
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-server/src/test/python/stacks/test_stack_adviser.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/test_stack_adviser.py b/ambari-server/src/test/python/stacks/test_stack_adviser.py
new file mode 100644
index 0000000..8146a0c
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/test_stack_adviser.py
@@ -0,0 +1,239 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+from unittest import TestCase
+
+
+class TestBasicAdvisor(TestCase):
+ def setUp(self):
+ import imp
+ self.maxDiff = None
+ self.testDirectory = os.path.dirname(os.path.abspath(__file__))
+ stackAdvisorPath = os.path.abspath(os.path.join(self.testDirectory, '../../../main/resources/stacks/stack_advisor.py'))
+
+ default_sa_classname = 'DefaultStackAdvisor'
+
+ with open(stackAdvisorPath, 'rb') as fp:
+ stack_advisor_impl = imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+
+ clazz = getattr(stack_advisor_impl, default_sa_classname)
+ self.stackAdvisor = clazz()
+
+ def test_filterHostMounts(self):
+
+ filtered_mount = "/data"
+
+ hosts = {
+ "items": [
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "vboxsf"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "ext3"},
+ {"mountpoint": filtered_mount, "type": "ext3"},
+ ],
+ "public_host_name": "c6401.ambari.apache.org",
+ "host_name": "c6401.ambari.apache.org"
+ },
+ },
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm1", "type": "ext3"},
+ {"mountpoint": "/vagrant1", "type": "ext3"},
+ {"mountpoint": filtered_mount, "type": "ext3"}
+ ],
+ "public_host_name": "c6402.ambari.apache.org",
+ "host_name": "c6402.ambari.apache.org"
+ },
+ }
+ ]
+ }
+
+ services = {
+ "Versions": {
+ "parent_stack_version": "2.5",
+ "stack_name": "HDP",
+ "stack_version": "2.6",
+ "stack_hierarchy": {
+ "stack_name": "HDP",
+ "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+ }
+ },
+ "services": [
+ ],
+ "configurations": {
+ "cluster-env": {
+ "properties": {
+ "agent_mounts_ignore_list": filtered_mount
+ }
+ }
+ }
+ }
+
+ filtered_hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+
+ for host in filtered_hosts["items"]:
+ self.assertEquals(False, filtered_mount in host["Hosts"]["disk_info"])
+
+ def test_getMountPathVariations(self):
+
+ filtered_mount = "/data"
+
+ hosts = {
+ "items": [
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "vboxsf"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "ext3"},
+ {"mountpoint": filtered_mount, "type": "ext3"},
+ ],
+ "public_host_name": "c6401.ambari.apache.org",
+ "host_name": "c6401.ambari.apache.org"
+ },
+ },
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm1", "type": "ext3"},
+ {"mountpoint": "/vagrant1", "type": "ext3"},
+ {"mountpoint": filtered_mount, "type": "ext3"}
+ ],
+ "public_host_name": "c6402.ambari.apache.org",
+ "host_name": "c6402.ambari.apache.org"
+ },
+ }
+ ]
+ }
+
+ services = {
+ "Versions": {
+ "parent_stack_version": "2.5",
+ "stack_name": "HDP",
+ "stack_version": "2.6",
+ "stack_hierarchy": {
+ "stack_name": "HDP",
+ "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+ }
+ },
+ "services": [
+ ],
+ "configurations": {
+ "cluster-env": {
+ "properties": {
+ "agent_mounts_ignore_list": filtered_mount
+ }
+ }
+ }
+ }
+
+ hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+ avail_mounts = self.stackAdvisor.getMountPathVariations("/test/folder", "DATANODE", services, hosts)
+
+ self.assertEquals(True, avail_mounts is not None)
+ self.assertEquals(1, len(avail_mounts))
+ self.assertEquals("/test/folder", avail_mounts[0])
+
+ def test_updateMountProperties(self):
+ hosts = {
+ "items": [
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "vboxsf"},
+ {"mountpoint": "/dev/shm", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "ext3"},
+ {"mountpoint": "/data", "type": "ext3"},
+ ],
+ "public_host_name": "c6401.ambari.apache.org",
+ "host_name": "c6401.ambari.apache.org"
+ },
+ },
+ {
+ "Hosts": {
+ "cpu_count": 4,
+ "total_mem": 50331648,
+ "disk_info": [
+ {"mountpoint": "/", "type": "ext3"},
+ {"mountpoint": "/dev/shm1", "type": "ext3"},
+ {"mountpoint": "/vagrant", "type": "vboxsf"},
+ {"mountpoint": "/data", "type": "ext3"}
+ ],
+ "public_host_name": "c6402.ambari.apache.org",
+ "host_name": "c6402.ambari.apache.org"
+ },
+ }
+ ]
+ }
+
+ services = {
+ "Versions": {
+ "parent_stack_version": "2.5",
+ "stack_name": "HDP",
+ "stack_version": "2.6",
+ "stack_hierarchy": {
+ "stack_name": "HDP",
+ "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+ }
+ },
+ "services": [
+ ],
+ "configurations": {
+ "cluster-env": {
+ "properties": {
+ "agent_mounts_ignore_list": ""
+ }
+ },
+ "some-site": {
+ "path_prop": "/test"
+ }
+ }
+ }
+
+ pathProperties = [
+ ("path_prop", "DATANODE", "/test", "multi"),
+ ]
+
+ configurations = {}
+ hosts = self.stackAdvisor.filterHostMounts(hosts, services)
+
+ self.stackAdvisor.updateMountProperties("some-site", pathProperties, configurations, services, hosts)
+
+ self.assertEquals("/test,/data/test", configurations["some-site"]["properties"]["path_prop"])
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-web/app/mixins.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins.js b/ambari-web/app/mixins.js
index d3ccbbe..ad682d7 100644
--- a/ambari-web/app/mixins.js
+++ b/ambari-web/app/mixins.js
@@ -66,4 +66,3 @@ require('mixins/common/widgets/widget_mixin');
require('mixins/common/widgets/widget_section');
require('mixins/unit_convert/base_unit_convert_mixin');
require('mixins/unit_convert/convert_unit_widget_view_mixin');
-require('utils/configs/mount_points_based_initializer_mixin');
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-web/app/utils/configs/config_initializer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/config_initializer.js b/ambari-web/app/utils/configs/config_initializer.js
index cb5b41f..de9ca7e 100644
--- a/ambari-web/app/utils/configs/config_initializer.js
+++ b/ambari-web/app/utils/configs/config_initializer.js
@@ -20,7 +20,6 @@ var App = require('app');
var stringUtils = require('utils/string_utils');
require('utils/configs/config_initializer_class');
-require('utils/configs/mount_points_based_initializer_mixin');
require('utils/configs/hosts_based_initializer_mixin');
/**
@@ -53,7 +52,7 @@ function getZKBasedConfig() {
*
* @instance ConfigInitializer
*/
-App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedInitializerMixin, App.HostsBasedInitializerMixin, {
+App.ConfigInitializer = App.ConfigInitializerClass.create(App.HostsBasedInitializerMixin, {
initializers: function() {
return {
@@ -111,26 +110,7 @@ App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedIn
'templeton.zookeeper.hosts': getZKBasedConfig(),
'hadoop.registry.zk.quorum': getZKBasedConfig(),
'hive.cluster.delegation.token.store.zookeeper.connectString': getZKBasedConfig(),
- 'instance.zookeeper.host': getZKBasedConfig(),
-
- 'dfs.name.dir': this.getMultipleMountPointsConfig('NAMENODE', 'file'),
- 'dfs.namenode.name.dir': this.getMultipleMountPointsConfig('NAMENODE', 'file'),
- 'dfs.data.dir': this.getMultipleMountPointsConfig('DATANODE', 'file'),
- 'dfs.datanode.data.dir': this.getMultipleMountPointsConfig('DATANODE', 'file'),
- 'yarn.nodemanager.local-dirs': this.getMultipleMountPointsConfig('NODEMANAGER'),
- 'yarn.nodemanager.log-dirs': this.getMultipleMountPointsConfig('NODEMANAGER'),
- 'mapred.local.dir': this.getMultipleMountPointsConfig(['TASKTRACKER', 'NODEMANAGER']),
- 'log.dirs': this.getMultipleMountPointsConfig('KAFKA_BROKER'),
-
- 'fs.checkpoint.dir': this.getSingleMountPointConfig('SECONDARY_NAMENODE', 'file'),
- 'dfs.namenode.checkpoint.dir': this.getSingleMountPointConfig('SECONDARY_NAMENODE', 'file'),
- 'yarn.timeline-service.leveldb-timeline-store.path': this.getSingleMountPointConfig('APP_TIMELINE_SERVER'),
- 'yarn.timeline-service.leveldb-state-store.path': this.getSingleMountPointConfig('APP_TIMELINE_SERVER'),
- 'dataDir': this.getSingleMountPointConfig('ZOOKEEPER_SERVER'),
- 'oozie_data_dir': this.getSingleMountPointConfig('OOZIE_SERVER'),
- 'storm.local.dir': this.getSingleMountPointConfig(['NODEMANAGER', 'NIMBUS']),
- '*.falcon.graph.storage.directory': this.getSingleMountPointConfig('FALCON_SERVER'),
- '*.falcon.graph.serialize.path': this.getSingleMountPointConfig('FALCON_SERVER')
+ 'instance.zookeeper.host': getZKBasedConfig()
}
}.property(''),
@@ -146,9 +126,7 @@ App.ConfigInitializer = App.ConfigInitializerClass.create(App.MountPointsBasedIn
},
initializerTypes: [
- {name: 'zookeeper_based', method: '_initAsZookeeperServersList'},
- {name: 'single_mountpoint', method: '_initAsSingleMountPoint'},
- {name: 'multiple_mountpoints', method: '_initAsMultipleMountPoints'}
+ {name: 'zookeeper_based', method: '_initAsZookeeperServersList'}
],
/**
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js b/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
deleted file mode 100644
index 59a3985..0000000
--- a/ambari-web/app/utils/configs/mount_points_based_initializer_mixin.js
+++ /dev/null
@@ -1,340 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-var App = require('app');
-
-/**
- * Regexp used to determine if mount point is windows-like
- *
- * @type {RegExp}
- */
-var winRegex = /^([a-z]):\\?$/;
-
-App.MountPointsBasedInitializerMixin = Em.Mixin.create({
-
- /**
- * Map for methods used as value-modifiers for configProperties with values as mount point(s)
- * Used if mount point is win-like (@see winRegex)
- * Key: id
- * Value: method-name
- *
- * @type {{default: string, file: string, slashes: string}}
- */
- winReplacersMap: {
- default: '_defaultWinReplace',
- file: '_winReplaceWithFile',
- slashes: '_defaultWinReplaceWithAdditionalSlashes'
- },
-
- /**
- * Initializer for configs with value as one of the possible mount points
- * Only hosts that contains on the components from <code>initializer.components</code> are processed
- * Hosts with Windows needs additional processing (@see winReplacersMap)
- * Value example: '/', '/some/cool/dir'
- *
- * @param {configProperty} configProperty
- * @param {topologyLocalDB} localDB
- * @param {object} dependencies
- * @param {object} initializer
- * @return {Object}
- */
- _initAsSingleMountPoint: function (configProperty, localDB, dependencies, initializer) {
- var hostsInfo = this._updateHostInfo(localDB.hosts);
- var setOfHostNames = this._getSetOfHostNames(localDB, initializer);
- var winReplacersMap = this.get('winReplacersMap');
- // In Add Host Wizard, if we did not select this slave component for any host, then we don't process any further.
- if (!setOfHostNames.length) {
- return configProperty;
- }
- var allMountPoints = this._getAllMountPoints(setOfHostNames, hostsInfo, localDB);
-
- var mPoint = allMountPoints[0].mountpoint;
- if (mPoint === "/") {
- mPoint = Em.get(configProperty, 'recommendedValue');
- }
- else {
- var mp = mPoint.toLowerCase();
- if (winRegex.test(mp)) {
- var methodName = winReplacersMap[initializer.winReplacer];
- mPoint = this[methodName].call(this, configProperty, mp);
- }
- else {
- mPoint = mPoint + Em.get(configProperty, 'recommendedValue');
- }
- }
- Em.setProperties(configProperty, {
- value: mPoint,
- recommendedValue: mPoint
- });
-
- return configProperty;
- },
-
- /**
- * Initializer for configs with value as all of the possible mount points
- * Only hosts that contains on the components from <code>initializer.components</code> are processed
- * Hosts with Windows needs additional processing (@see winReplacersMap)
- * Value example: '/\n/some/cool/dir' (`\n` - is divider)
- *
- * @param {Object} configProperty
- * @param {topologyLocalDB} localDB
- * @param {object} dependencies
- * @param {object} initializer
- * @return {Object}
- */
- _initAsMultipleMountPoints: function (configProperty, localDB, dependencies, initializer) {
- var hostsInfo = this._updateHostInfo(localDB.hosts);
- var self = this;
- var setOfHostNames = this._getSetOfHostNames(localDB, initializer);
- var winReplacersMap = this.get('winReplacersMap');
- // In Add Host Wizard, if we did not select this slave component for any host, then we don't process any further.
- if (!setOfHostNames.length) {
- return configProperty;
- }
-
- var allMountPoints = this._getAllMountPoints(setOfHostNames, hostsInfo, localDB);
- var mPoint = '';
-
- allMountPoints.forEach(function (eachDrive) {
- if (eachDrive.mountpoint === '/') {
- mPoint += Em.get(configProperty, 'recommendedValue') + "\n";
- }
- else {
- var mp = eachDrive.mountpoint.toLowerCase();
- if (winRegex.test(mp)) {
- var methodName = winReplacersMap[initializer.winReplacer];
- mPoint += self[methodName].call(this, configProperty, mp);
- }
- else {
- mPoint += eachDrive.mountpoint + Em.get(configProperty, 'recommendedValue') + "\n";
- }
- }
- }, this);
-
- Em.setProperties(configProperty, {
- value: mPoint,
- recommendedValue: mPoint
- });
-
- return configProperty;
- },
-
- /**
- * Replace drive-based windows-path with 'file:///'
- *
- * @param {configProperty} configProperty
- * @param {string} mountPoint
- * @returns {string}
- * @private
- */
- _winReplaceWithFile: function (configProperty, mountPoint) {
- var winDriveUrl = mountPoint.toLowerCase().replace(winRegex, 'file:///$1:');
- return winDriveUrl + Em.get(configProperty, 'recommendedValue') + '\n';
- },
-
- /**
- * Replace drive-based windows-path
- *
- * @param {configProperty} configProperty
- * @param {string} mountPoint
- * @returns {string}
- * @private
- */
- _defaultWinReplace: function (configProperty, mountPoint) {
- var winDrive = mountPoint.toLowerCase().replace(winRegex, '$1:');
- var winDir = Em.get(configProperty, 'recommendedValue').replace(/\//g, '\\');
- return winDrive + winDir + '\n';
- },
-
- /**
- * Same to <code>_defaultWinReplace</code>, but with extra-slash in the end
- *
- * @param {configProperty} configProperty
- * @param {string} mountPoint
- * @returns {string}
- * @private
- */
- _defaultWinReplaceWithAdditionalSlashes: function (configProperty, mountPoint) {
- var winDrive = mountPoint.toLowerCase().replace(winRegex, '$1:');
- var winDir = Em.get(configProperty, 'recommendedValue').replace(/\//g, '\\\\');
- return winDrive + winDir + '\n';
- },
-
- /**
- * Update information from localDB using <code>App.Host</code>-model
- *
- * @param {object} hostsInfo
- * @returns {object}
- * @private
- */
- _updateHostInfo: function (hostsInfo) {
- App.Host.find().forEach(function (item) {
- if (!hostsInfo[item.get('id')]) {
- hostsInfo[item.get('id')] = {
- name: item.get('id'),
- cpu: item.get('cpu'),
- memory: item.get('memory'),
- disk_info: item.get('diskInfo'),
- bootStatus: "REGISTERED",
- isInstalled: true
- };
- }
- });
- return hostsInfo;
- },
-
- /**
- * Determines if mount point is valid
- * Criterias:
- * <ul>
- * <li>Should has available space</li>
- * <li>Should not be home-dir</li>
- * <li>Should not be docker-dir</li>
- * <li>Should not be boot-dir</li>
- * <li>Should not be dev-dir</li>
- * <li>Valid mount point started from /usr/hdp/ should be /usr/hdp/current
- * or /usr/hdp/<STACK_VERSION_NUMBER> e.g. /usr/hdp/2.5.0.0
- * </li>
- * </ul>
- *
- * @param {{mountpoint: string, available: number}} mPoint
- * @returns {function} true - valid, false - invalid
- * @private
- */
- _filterMountPoint: function (localDB) {
- var stackVersionNumber = [Em.getWithDefault(localDB.selectedStack || {}, 'repository_version', null)].compact();
- return function(mPoint) {
- var isAvailable = mPoint.available !== 0;
- if (!isAvailable) {
- return false;
- }
-
- var stackRoot = '/usr/hdp';
- var notHome = !['/', '/home'].contains(mPoint.mountpoint);
- var notDocker = !['/etc/resolv.conf', '/etc/hostname', '/etc/hosts'].contains(mPoint.mountpoint);
- var notBoot = mPoint.mountpoint && !(mPoint.mountpoint.startsWith('/boot')
- || mPoint.mountpoint.startsWith('/mnt')
- || mPoint.mountpoint.startsWith('/tmp'));
- var notDev = !(['devtmpfs', 'tmpfs', 'vboxsf', 'CDFS'].contains(mPoint.type));
- var validStackRootMount = !(mPoint.mountpoint.startsWith(stackRoot) && !['current'].concat(stackVersionNumber).filter(function(i) {
- return mPoint.mountpoint === stackRoot + '/' + i;
- }).length);
-
- return notHome && notDocker && notBoot && notDev && validStackRootMount;
- };
- },
-
- /**
- * Get list of hostNames from localDB which contains needed components
- *
- * @param {topologyLocalDB} localDB
- * @param {object} initializer
- * @returns {string[]}
- * @private
- */
- _getSetOfHostNames: function (localDB, initializer) {
- var masterComponentHostsInDB = Em.getWithDefault(localDB, 'masterComponentHosts', []);
- var slaveComponentHostsInDB = Em.getWithDefault(localDB, 'slaveComponentHosts', []);
- var hosts = masterComponentHostsInDB.filter(function (master) {
- return initializer.components.contains(master.component);
- }).mapProperty('hostName');
-
- var sHosts = slaveComponentHostsInDB.find(function (slave) {
- return initializer.components.contains(slave.componentName);
- });
- if (sHosts) {
- hosts = hosts.concat(sHosts.hosts.mapProperty('hostName'));
- }
- return hosts;
- },
-
- /**
- * Get list of all unique valid mount points for hosts
- *
- * @param {string[]} setOfHostNames
- * @param {object} hostsInfo
- * @param {topologyLocalDB} localDB
- * @returns {string[]}
- * @private
- */
- _getAllMountPoints: function (setOfHostNames, hostsInfo, localDB) {
- var allMountPoints = [],
- mountPointFilter = this._filterMountPoint(localDB);
- for (var i = 0; i < setOfHostNames.length; i++) {
- var hostname = setOfHostNames[i];
- var mountPointsPerHost = hostsInfo[hostname].disk_info;
- var mountPointAsRoot = mountPointsPerHost.findProperty('mountpoint', '/');
-
- // If Server does not send any host details information then atleast one mountpoint should be presumed as root
- // This happens in a single container Linux Docker environment.
- if (!mountPointAsRoot) {
- mountPointAsRoot = {
- mountpoint: '/'
- };
- }
-
- mountPointsPerHost.filter(mountPointFilter).forEach(function (mPoint) {
- if( !allMountPoints.findProperty("mountpoint", mPoint.mountpoint)) {
- allMountPoints.push(mPoint);
- }
- }, this);
- }
-
- if (!allMountPoints.length) {
- allMountPoints.push(mountPointAsRoot);
- }
- return allMountPoints;
- },
-
- /**
- * Settings for <code>single_mountpoint</code>-initializer
- * Used for configs with value as one of the possible mount points
- *
- * @see _initAsSingleMountPoint
- * @param {string|string[]} components
- * @param {string} winReplacer
- * @returns {{components: string[], winReplacer: string, type: string}}
- */
- getSingleMountPointConfig: function (components, winReplacer) {
- winReplacer = winReplacer || 'default';
- return {
- components: Em.makeArray(components),
- winReplacer: winReplacer,
- type: 'single_mountpoint'
- };
- },
-
- /**
- * Settings for <code>multiple_mountpoints</code>-initializer
- * Used for configs with value as all of the possible mount points
- *
- * @see _initAsMultipleMountPoints
- * @param {string|string[]} components
- * @param {string} winReplacer
- * @returns {{components: string[], winReplacer: string, type: string}}
- */
- getMultipleMountPointsConfig: function (components, winReplacer) {
- winReplacer = winReplacer || 'default';
- return {
- components: Em.makeArray(components),
- winReplacer: winReplacer,
- type: 'multiple_mountpoints'
- };
- }
-
-});
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-web/test/utils/ajax/ajax_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/utils/ajax/ajax_test.js b/ambari-web/test/utils/ajax/ajax_test.js
index 747ccff..35de05f 100644
--- a/ambari-web/test/utils/ajax/ajax_test.js
+++ b/ambari-web/test/utils/ajax/ajax_test.js
@@ -23,11 +23,16 @@ describe('App.ajax', function() {
beforeEach(function() {
App.ajax.send.restore();
+ sinon.stub(App.logger, 'setTimer');
sinon.spy(App.ajax, 'send'); // no sense to test stubbed function, so going to spy on it
App.set('apiPrefix', '/api/v1');
App.set('clusterName', 'tdk');
});
+ afterEach(function() {
+ App.logger.setTimer.restore();
+ });
+
describe('#send', function() {
it('Without sender', function() {
expect(App.ajax.send({})).to.equal(null);
@@ -162,7 +167,7 @@ describe('App.ajax', function() {
});
});
});
-
+
describe('#abortRequests', function () {
var xhr = {
@@ -192,6 +197,6 @@ describe('App.ajax', function() {
it('should clear requests array', function () {
expect(requests).to.have.length(0);
});
-
+
});
});
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab6d5523/ambari-web/test/utils/configs/config_initializer_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/utils/configs/config_initializer_test.js b/ambari-web/test/utils/configs/config_initializer_test.js
index e951b34..d348055 100644
--- a/ambari-web/test/utils/configs/config_initializer_test.js
+++ b/ambari-web/test/utils/configs/config_initializer_test.js
@@ -905,312 +905,6 @@ describe('App.ConfigInitializer', function () {
});
- describe('config with mount points', function () {
-
- var localDB = {
- masterComponentHosts: [
- {
- component: 'NAMENODE',
- hostName: 'h0'
- },
- {
- component: 'SECONDARY_NAMENODE',
- hostName: 'h4'
- },
- {
- component: 'APP_TIMELINE_SERVER',
- hostName: 'h0'
- },
- {
- component: 'ZOOKEEPER_SERVER',
- hostName: 'h0'
- },
- {
- component: 'ZOOKEEPER_SERVER',
- hostName: 'h1'
- },
- {
- component: 'OOZIE_SERVER',
- hostName: 'h0'
- },
- {
- component: 'OOZIE_SERVER',
- hostName: 'h1'
- },
- {
- component: 'NIMBUS',
- hostName: 'h2'
- },
- {
- component: 'FALCON_SERVER',
- hostName: 'h3'
- },
- {
- component: 'KAFKA_BROKER',
- hostName: 'h0'
- },
- {
- component: 'KAFKA_BROKER',
- hostName: 'h1'
- }
- ],
- slaveComponentHosts: [
- {
- componentName: 'DATANODE',
- hosts: [
- {
- hostName: 'h0'
- },
- {
- hostName: 'h1'
- }
- ]
- },
- {
- componentName: 'TASKTRACKER',
- hosts: [
- {
- hostName: 'h0'
- },
- {
- hostName: 'h1'
- }
- ]
- },
- {
- componentName: 'NODEMANAGER',
- hosts: [
- {
- hostName: 'h0'
- },
- {
- hostName: 'h1'
- },
- {
- hostName: 'h4'
- }
- ]
- },
- {
- componentName: 'HBASE_REGIONSERVER',
- hosts: [
- {
- hostName: 'h0'
- },
- {
- hostName: 'h1'
- }
- ]
- },
- {
- componentName: 'SUPERVISOR',
- hosts: [
- {
- hostName: 'h0'
- },
- {
- hostName: 'h1'
- }
- ]
- }
- ],
- hosts: {
- h0: {
- disk_info: [
- {
- mountpoint: '/'
- },
- {
- mountpoint: '/home'
- },
- {
- mountpoint: '/boot'
- },
- {
- mountpoint: '/boot/efi'
- },
- {
- mountpoint: '/mnt'
- },
- {
- mountpoint: '/mnt/efi'
- },
- {
- mountpoint: '/media/disk0',
- available: '100000000'
- },
- {
- mountpoint: '/mount0',
- available: '100000000'
- }
- ]
- },
- h4: {
- disk_info: [
- {
- mountpoint: 'c:',
- available: '100000000'
- }
- ]
- }
- }
- },
- cases = [
- {
- name: 'dfs.namenode.name.dir',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n'
- },
- {
- name: 'dfs.name.dir',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n'
- },
- {
- name: 'fs.checkpoint.dir',
- isOnlyFirstOneNeeded: true,
- value: 'file:///c:/default\n'
- },
- {
- name: 'dfs.namenode.checkpoint.dir',
- isOnlyFirstOneNeeded: true,
- value: 'file:///c:/default\n'
- },
- {
- name: 'dfs.data.dir',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
- },
- {
- name: 'dfs.datanode.data.dir',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
- },
- {
- name: 'mapred.local.dir',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
- },
- {
- name: 'yarn.nodemanager.log-dirs',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\nc:\\default\n'
- },
- {
- name: 'yarn.nodemanager.local-dirs',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\nc:\\default\n'
- },
- {
- name: 'yarn.timeline-service.leveldb-timeline-store.path',
- isOnlyFirstOneNeeded: true,
- value: '/media/disk0/default'
- },
- {
- name: 'yarn.timeline-service.leveldb-state-store.path',
- isOnlyFirstOneNeeded: true,
- value: '/media/disk0/default'
- },
- {
- name: 'dataDir',
- isOnlyFirstOneNeeded: true,
- value: '/media/disk0/default'
- },
- {
- name: 'oozie_data_dir',
- isOnlyFirstOneNeeded: true,
- value: '/media/disk0/default'
- },
- {
- name: 'storm.local.dir',
- isOnlyFirstOneNeeded: true,
- value: '/media/disk0/default'
- },
- {
- name: '*.falcon.graph.storage.directory',
- isOnlyFirstOneNeeded: true,
- value: '/default'
- },
- {
- name: '*.falcon.graph.serialize.path',
- isOnlyFirstOneNeeded: true,
- value: '/default'
- },
- {
- name: 'log.dirs',
- isOnlyFirstOneNeeded: false,
- value: '/media/disk0/default\n/mount0/default\n/media/disk1/default\n/mount1/default\n'
- }
- ];
-
- beforeEach(function () {
- sinon.stub(App.Host, 'find').returns([
- Em.Object.create({
- id: 'h1',
- diskInfo: [
- {
- mountpoint: '/media/disk1',
- type: 'devtmpfs'
- },
- {
- mountpoint: '/media/disk1',
- type: 'tmpfs'
- },
- {
- mountpoint: '/media/disk1',
- type: 'vboxsf'
- },
- {
- mountpoint: '/media/disk1',
- type: 'CDFS'
- },
- {
- mountpoint: '/media/disk1',
- available: '0'
- },
- {
- mountpoint: '/media/disk1',
- available: '100000000'
- },
- {
- mountpoint: '/mount1',
- available: '100000000'
- }
- ]
- }),
- Em.Object.create({
- id: 'h2',
- diskInfo: [
- {
- mountpoint: '/'
- }
- ]
- }),
- Em.Object.create({
- id: 'h3',
- diskInfo: []
- })
- ]);
- });
-
- afterEach(function () {
- App.Host.find.restore();
- });
-
- cases.forEach(function (item) {
- it(item.name, function () {
- serviceConfigProperty.setProperties({
- name: item.name,
- recommendedValue: '/default'
- });
- App.ConfigInitializer.initialValue(serviceConfigProperty, localDB, {});
- expect(serviceConfigProperty.get('value')).to.equal(item.value);
- expect(serviceConfigProperty.get('recommendedValue')).to.equal(item.value);
- });
- });
-
- });
-
describe('initializerTypes', function () {
var types = App.ConfigInitializer.get('initializerTypes');
Em.keys(types).forEach(function(type) {
@@ -1275,155 +969,4 @@ describe('App.ConfigInitializer', function () {
});
});
- describe('#_filterMountPoint', function() {
- [
- {
- mPoint: {
- mountpoint: '/'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/home'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/etc/resolv.conf'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/etc/hostname'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/etc/hosts'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/boot'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/mnt'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/tmp'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/some-dir',
- type: 'devtmpfs'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/some-dir',
- type: 'tmpfs'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/some-dir',
- type: 'vboxsf'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/some-dir',
- type: 'CDFS'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/usr/hdp'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/usr/hdp/1'
- },
- localDB: {},
- e: false
- },
- {
- mPoint: {
- mountpoint: '/usr/hdp/current'
- },
- localDB: {},
- e: true
- },
- {
- mPoint: {
- mountpoint: '/usr/hdp/2.5'
- },
- localDB: {
- selectedStack: {
- repository_version: '2.5'
- }
- },
- e: true
- },
- {
- mPoint: {
- mountpoint: '/usr/hdp/2.5.0'
- },
- localDB: {
- selectedStack: {
- repository_version: '2.5'
- }
- },
- e: false
- },
- {
- mPoint: {
- mountpoint: '/normal/directory'
- },
- localDB: {
- selectedStack: {
- repository_version: '2.5'
- }
- },
- e: true
- }
- ].forEach(function(test) {
- it('mount point "{0}" should be {1}'.format(test.mPoint.mountpoint, test.e ? 'valid' : 'invalid'), function() {
- var fFn = App.ConfigInitializer._filterMountPoint(test.localDB);
- expect(fFn(test.mPoint)).to.be.equal(test.e);
- });
- });
- });
});