You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2015/03/09 19:24:22 UTC

ambari git commit: AMBARI-9954. Spark on tez apps fails needs tez.tar.gz copied to HDFS (alejandro)

Repository: ambari
Updated Branches:
  refs/heads/trunk a0910f7f6 -> 3e1857071


AMBARI-9954. Spark on tez apps fails needs tez.tar.gz copied to HDFS (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3e185707
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3e185707
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3e185707

Branch: refs/heads/trunk
Commit: 3e1857071efdb50d6c289e8d232812b3002be4c2
Parents: a0910f7
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Thu Mar 5 16:09:34 2015 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon Mar 9 10:57:12 2015 -0700

----------------------------------------------------------------------
 .../SPARK/1.2.0.2.2/metainfo.xml                |   7 +
 .../package/scripts/job_history_server.py       |  10 +-
 .../stacks/2.2/SPARK/test_job_history_server.py | 106 ++++++++++++
 .../2.2/configs/spark-job-history-server.json   | 165 +++++++++++++++++++
 4 files changed, 286 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3e185707/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/metainfo.xml b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/metainfo.xml
index ce8ad7a..5cd6038 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/metainfo.xml
@@ -33,6 +33,13 @@
           <versionAdvertised>true</versionAdvertised>
           <dependencies>
             <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
               <name>HDFS/HDFS_CLIENT</name>
               <scope>host</scope>
               <auto-deploy>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e185707/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
index 518352f..ff04b58 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
@@ -20,8 +20,11 @@ limitations under the License.
 
 import sys
 import os
-from resource_management import *
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.dynamic_variable_interpretation import copy_tarballs_to_hdfs
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.core.resources import Execute
 from resource_management.core.exceptions import ComponentIsNotRunning
 from resource_management.core.logger import Logger
 from resource_management.core import shell
@@ -39,6 +42,7 @@ class JobHistoryServer(Script):
     env.set_params(params)
     if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
       Execute(format("hdp-select set spark-historyserver {version}"))
+      copy_tarballs_to_hdfs('tez', 'spark-historyserver', params.spark_user, params.hdfs_user, params.user_group)
 
   def install(self, env):
     self.install_packages(env)
@@ -62,12 +66,14 @@ class JobHistoryServer(Script):
     import params
 
     env.set_params(params)
-    setup_spark(env, 'server', action = 'start')
+    setup_spark(env, 'server', action='start')
 
     if params.security_enabled:
       spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
       Execute(spark_kinit_cmd, user=params.spark_user)
 
+    copy_tarballs_to_hdfs('tez', 'spark-historyserver', params.spark_user, params.hdfs_user, params.user_group)
+
     daemon_cmd = format('{spark_history_server_start}')
     no_op_test = format(
       'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e185707/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
new file mode 100644
index 0000000..469f68e
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import sys
+import os
+from mock.mock import MagicMock, patch
+
+from stacks.utils.RMFTestCase import *
+from resource_management.core import shell
+from resource_management.libraries.functions import dynamic_variable_interpretation
+
+class TestJobHistoryServer(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "SPARK/1.2.0.2.2/package"
+  STACK_VERSION = "2.2"
+
+  def setUp(self):
+    sys.path.insert(0, os.path.join(os.getcwd(),
+      "../../main/resources/common-services", self.COMMON_SERVICES_PACKAGE_DIR,
+      "scripts"))
+
+  @patch.object(shell, "call")
+  @patch("setup_spark.create_file")
+  @patch("setup_spark.write_properties_to_file")
+  @patch.object(dynamic_variable_interpretation, "copy_tarballs_to_hdfs")
+  def test_start(self, copy_tarball_mock, write_properties_to_file_mock, create_file_mock, call_mock):
+    hdp_version = "2.2.2.0-2538"
+    call_mock.return_value = (0, hdp_version)
+    copy_tarball_mock.return_value = 0
+
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/job_history_server.py",
+                         classname="JobHistoryServer",
+                         command="start",
+                         config_file="spark-job-history-server.json",
+                         hdp_stack_version=self.STACK_VERSION,
+                         target=RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+    self.assertTrue(create_file_mock.called)
+    self.assertTrue(write_properties_to_file_mock.called)
+
+
+    self.assertResourceCalled("Directory", "/var/run/spark",
+                              owner="spark",
+                              group="hadoop",
+                              recursive=True
+    )
+    self.assertResourceCalled("Directory", "/var/log/spark",
+                              owner="spark",
+                              group="hadoop",
+                              recursive=True
+    )
+    self.assertResourceCalled("HdfsDirectory", "/user/spark",
+                              security_enabled=False,
+                              keytab=UnknownConfigurationMock(),
+                              conf_dir="/etc/hadoop/conf",
+                              hdfs_user="hdfs",
+                              kinit_path_local="/usr/bin/kinit",
+                              mode=509,
+                              owner="spark",
+                              bin_dir="/usr/hdp/current/hadoop-client/bin",
+                              action=["create"]
+    )
+    self.assertResourceCalled("File", "/etc/spark/conf/spark-env.sh",
+                              owner="spark",
+                              group="spark",
+                              content=InlineTemplate(self.getConfig()['configurations']['spark-env']['content'])
+    )
+    self.assertResourceCalled("File", "/etc/spark/conf/log4j.properties",
+                              owner="spark",
+                              group="spark",
+                              content=self.getConfig()['configurations']['spark-log4j-properties']['content']
+    )
+    self.assertResourceCalled("File", "/etc/spark/conf/metrics.properties",
+                              owner="spark",
+                              group="spark",
+                              content=InlineTemplate(self.getConfig()['configurations']['spark-metrics-properties']['content'])
+    )
+    self.assertResourceCalled("File", "/etc/spark/conf/java-opts",
+                              owner="spark",
+                              group="spark",
+                              content="  -Dhdp.version=" + hdp_version
+    )
+
+    copy_tarball_mock.assert_called_with("tez", "spark-historyserver", "spark", "hdfs", "hadoop")
+    
+    self.assertResourceCalled("Execute", "/usr/hdp/current/spark-historyserver/sbin/start-history-server.sh",
+                              not_if="ls /var/run/spark/spark-spark-org.apache.spark.deploy.history.HistoryServer-1.pid >/dev/null 2>&1 && ps -p `cat /var/run/spark/spark-spark-org.apache.spark.deploy.history.HistoryServer-1.pid` >/dev/null 2>&1",
+                              environment={'JAVA_HOME': '/usr/jdk64/jdk1.7.0_67'},
+                              user="spark"
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e185707/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json b/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
new file mode 100644
index 0000000..e9f6e9a
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.2/configs/spark-job-history-server.json
@@ -0,0 +1,165 @@
+{
+    "configuration_attributes": {
+        "spark-defaults": {},
+        "spark-javaopts-properties": {}, 
+        "spark-log4j-properties": {},
+        "spark-env": {},
+        "core-site": {
+            "final": {
+                "fs.defaultFS": "true"
+            }
+        },
+        "hadoop-env": {},
+        "spark-metrics-properties": {},
+        "cluster-env": {}
+    }, 
+    "commandParams": {
+        "service_package_folder": "common-services/SPARK/1.2.0.2.2/package", 
+        "script": "scripts/job_history_server.py", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "version": "2.2.2.0-2538", 
+        "command_timeout": "600", 
+        "script_type": "PYTHON"
+    }, 
+    "roleCommand": "START", 
+    "kerberosCommandParams": [], 
+    "clusterName": "c1", 
+    "hostname": "c6408.ambari.apache.org", 
+    "hostLevelParams": {
+        "jdk_location": "http://c6408.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.7.0_67", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6408.ambari.apache.org/ambarirca",
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.2.2.0-2538\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/GA/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.2.2.0-2538\",\"baseSaved\":true},{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"baseSaved\":true}]", 
+        "group_list": "[\"hadoop\",\"users\",\"spark\"]", 
+        "package_list": "[{\"name\":\"spark_2_2_*\"},{\"name\":\"spark_2_2_*-python\"}]", 
+        "stack_version": "2.2", 
+        "stack_name": "HDP", 
+        "db_name": "ambari",
+        "user_list": "[\"mapred\",\"ambari-qa\",\"zookeeper\",\"tez\",\"false\",\"hdfs\",\"yarn\",\"spark\"]",
+        "clientsToUpdateConfigs": "[\"*\"]"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {}, 
+    "serviceName": "SPARK", 
+    "role": "SPARK_JOBHISTORYSERVER", 
+    "forceRefreshConfigTags": [], 
+    "taskId": 70, 
+    "public_hostname": "c6408.ambari.apache.org", 
+    "configurations": {
+        "spark-defaults": {
+            "spark.yarn.applicationMaster.waitTries": "10", 
+            "spark.history.kerberos.keytab": "none", 
+            "spark.yarn.preserve.staging.files": "false", 
+            "spark.yarn.submit.file.replication": "3", 
+            "spark.history.kerberos.principal": "none", 
+            "spark.yarn.driver.memoryOverhead": "384", 
+            "spark.yarn.queue": "default", 
+            "spark.yarn.containerLauncherMaxThreads": "25", 
+            "spark.yarn.scheduler.heartbeat.interval-ms": "5000", 
+            "spark.history.ui.port": "18080", 
+            "spark.yarn.max.executor.failures": "3", 
+            "spark.driver.extraJavaOptions": "", 
+            "spark.history.provider": "org.apache.spark.deploy.yarn.history.YarnHistoryProvider", 
+            "spark.yarn.am.extraJavaOptions": "", 
+            "spark.yarn.executor.memoryOverhead": "384"
+        },
+        "spark-javaopts-properties": {
+            "content": " "
+        }, 
+        "spark-log4j-properties": {
+            "content": "\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO"
+        },
+        "spark-env": {
+            "content": "\n#!/usr/bin/env bash\n\n# This file is sourced when running various Spark programs.\n# Copy it as spark-env.sh and edit that to configure Spark for your site.\n\n# Options read in YARN client mode\n#SPARK_EXECUTOR_INSTANCES=\"2\" #Number of workers to start (Default: 2)\n#SPARK_EXECUTOR_CORES=\"1\" #Number of cores for the workers (Default: 1).\n#SPARK_EXECUTOR_MEMORY=\"1G\" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)\n#SPARK_DRIVER_MEMORY=\"512 Mb\" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)\n#SPARK_YARN_APP_NAME=\"spark\" #The name of your application (Default: Spark)\n#SPARK_YARN_QUEUE=\"~@~Xdefault~@~Y\" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)\n#SPARK_YARN_DIST_FILES=\"\" #Comma separated list of files to be distributed with the job.\n#SPARK_YARN_DIST_ARCHIVES=\"\" #Comma separated list of archives to be distributed with the job.\n\n# Generic options for the daemons used in the standalone deploy mode\n\
 n# Alternate conf dir. (Default: ${SPARK_HOME}/conf)\nexport SPARK_CONF_DIR=${SPARK_HOME:-{{spark_home}}}/conf\n\n# Where log files are stored.(Default:${SPARK_HOME}/logs)\n#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs\nexport SPARK_LOG_DIR={{spark_log_dir}}\n\n# Where the pid file is stored. (Default: /tmp)\nexport SPARK_PID_DIR={{spark_pid_dir}}\n\n# A string representing this instance of spark.(Default: $USER)\nSPARK_IDENT_STRING=$USER\n\n# The scheduling priority for daemons. (Default: 0)\nSPARK_NICENESS=0\n\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\nif [ -d \"/etc/tez/conf/\" ]; then\n  export TEZ_CONF_DIR=/etc/tez/conf\nelse\n  export TEZ_CONF_DIR=\nfi", 
+            "spark_pid_dir": "/var/run/spark", 
+            "spark_log_dir": "/var/log/spark", 
+            "spark_group": "spark", 
+            "spark_user": "spark"
+        },
+        "hadoop-env": {
+            "dtnode_heapsize": "1024m", 
+            "namenode_opt_maxnewsize": "256m", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "namenode_heapsize": "1024m", 
+            "proxyuser_group": "users", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam
 ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following
  applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HAD
 OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/
 java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_
 OPTS\"", 
+            "hdfs_user": "hdfs", 
+            "namenode_opt_newsize": "256m", 
+            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
+            "hadoop_root_logger": "INFO,RFA", 
+            "hadoop_heapsize": "1024", 
+            "namenode_opt_maxpermsize": "256m", 
+            "namenode_opt_permsize": "128m"
+        },
+        "spark-metrics-properties": {
+            "content": "\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark's internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and \"applications\". A wild card \"*\" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a \"source\" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component's internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and ar
 e then loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the \"class\" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you wa
 nt to\n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request \"/metrics/json\" to get a snapshot of all the\n# registered metrics in json format. For master, requests \"/metrics/master/json\" and\n# \"/metrics/applications/json\" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# 
 host NONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode ('unicast' or 'multicast')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms ('false' or 'true')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name
 \n\n## Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source
 .JvmSource\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource"
+        },
+        "cluster-env": {
+            "security_enabled": "false", 
+            "hive_tar_source": "/usr/hdp/current/hive-client/hive.tar.gz", 
+            "hadoop-streaming_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mapreduce/", 
+            "pig_tar_source": "/usr/hdp/current/pig-client/pig.tar.gz", 
+            "tez_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/tez/", 
+            "ignore_groupsusers_create": "false", 
+            "hadoop-streaming_tar_source": "/usr/hdp/current/hadoop-mapreduce-client/hadoop-streaming.jar", 
+            "kerberos_domain": "EXAMPLE.COM", 
+            "hive_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/hive/", 
+            "mapreduce_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mapreduce/", 
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
+            "pig_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/", 
+            "user_group": "hadoop", 
+            "mapreduce_tar_source": "/usr/hdp/current/hadoop-client/mapreduce.tar.gz", 
+            "sqoop_tar_source": "/usr/hdp/current/sqoop-client/sqoop.tar.gz", 
+            "smokeuser": "ambari-qa", 
+            "sqoop_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/", 
+            "tez_tar_source": "/usr/hdp/current/tez-client/lib/tez.tar.gz"
+        }
+    }, 
+    "configurationTags": {
+        "spark-defaults": {
+            "tag": "version1425610281212"
+        },
+        "spark-javaopts-properties": {
+            "tag": "version1425610281212"
+        }, 
+        "spark-log4j-properties": {
+            "tag": "version1425610281212"
+        },
+        "spark-env": {
+            "tag": "version1425610281212"
+        },
+        "core-site": {
+            "tag": "version1"
+        },
+        "hadoop-env": {
+            "tag": "version1"
+        },
+        "spark-metrics-properties": {
+            "tag": "version1425610281212"
+        },
+        "cluster-env": {
+            "tag": "version1"
+        }
+    }, 
+    "commandId": "12-0", 
+    "clusterHostInfo": {
+        "all_hosts": [
+            "c6408.ambari.apache.org", 
+            "c6410.ambari.apache.org", 
+            "c6409.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6408.ambari.apache.org", 
+            "c6410.ambari.apache.org", 
+            "c6409.ambari.apache.org"
+        ],
+        "spark_jobhistoryserver_hosts": [
+            "c6408.ambari.apache.org"
+        ], 
+        "ambari_server_host": [
+            "c6408.ambari.apache.org"
+        ]
+    }
+}
\ No newline at end of file