You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rn...@apache.org on 2014/12/22 18:32:29 UTC

[1/3] ambari git commit: AMBARI-8771. Add support for deploying HDFS NameNode HA Clusters with Blueprints. (rnettleton)

Repository: ambari
Updated Branches:
  refs/heads/trunk 4f919d2c1 -> 4320de6ee


http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
new file mode 100644
index 0000000..4bbb0f5
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
@@ -0,0 +1,618 @@
+{
+    "roleCommand": "START", 
+    "clusterName": "cl1", 
+    "hostname": "c6402.ambari.apache.org", 
+    "passiveInfo": [], 
+    "hostLevelParams": {
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]", 
+        "package_list": "[{\"type\":\"rpm\",\"name\":\"hadoop-yarn\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-nodemanager\"},{\"type\":\"rpm\",\"name\":\"hadoop-mapreduce\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-proxyserver\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-resourcemanager\"}]", 
+        "stack_version": "2.0",
+        "stack_name": "HDP", 
+        "db_name": "ambari", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-7u67-linux-x64.tar.gz",
+        "ambari_db_rca_username": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45", 
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {}, 
+    "serviceName": "YARN", 
+    "role": "NODEMANAGER", 
+    "commandParams": {
+        "service_package_folder": "HDP/2.0.6/services/YARN/package", 
+        "script": "scripts/nodemanager.py", 
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "command_timeout": "600", 
+        "script_type": "PYTHON"
+    }, 
+    "taskId": 93, 
+    "public_hostname": "c6401.ambari.apache.org", 
+    "configurations": {
+        "mapred-site": {
+            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 
+            "mapreduce.cluster.administrators": " hadoop", 
+            "mapreduce.reduce.input.buffer.percent": "0.0", 
+            "mapreduce.output.fileoutputformat.compress": "false", 
+            "mapreduce.framework.name": "yarn", 
+            "mapreduce.map.speculative": "false", 
+            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
+            "yarn.app.mapreduce.am.resource.mb": "683", 
+            "mapreduce.map.java.opts": "-Xmx273m", 
+            "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*", 
+            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
+            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
+            "mapreduce.reduce.speculative": "false", 
+            "mapreduce.reduce.java.opts": "-Xmx546m", 
+            "mapreduce.am.max-attempts": "2", 
+            "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.reduce.log.level": "INFO", 
+            "mapreduce.map.sort.spill.percent": "0.7", 
+            "mapreduce.task.timeout": "300000", 
+            "mapreduce.map.memory.mb": "341", 
+            "mapreduce.task.io.sort.factor": "100", 
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
+            "mapreduce.reduce.memory.mb": "683", 
+            "yarn.app.mapreduce.am.log.level": "INFO", 
+            "mapreduce.map.log.level": "INFO", 
+            "mapreduce.shuffle.port": "13562", 
+            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &> /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`", 
+            "mapreduce.map.output.compress": "false", 
+            "yarn.app.mapreduce.am.staging-dir": "/user", 
+            "mapreduce.reduce.shuffle.parallelcopies": "30", 
+            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
+            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
+            "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.task.io.sort.mb": "136", 
+            "yarn.app.mapreduce.am.command-opts": "-Xmx546m", 
+            "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
+        }, 
+        "global": {
+            "security_enabled": "false", 
+            "proxyuser_group": "users", 
+            "zk_user": "zookeeper", 
+            "falcon_user": "falcon", 
+            "syncLimit": "5", 
+            "yarn_user": "yarn", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "yarn_heapsize": "1024", 
+            "rca_enabled": "false", 
+            "namenode_heapsize": "1024m", 
+            "oozie_user": "oozie", 
+            "hcat_conf_dir": "", 
+            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
+            "lzo_enabled": "true", 
+            "namenode_opt_maxnewsize": "200m", 
+            "smokeuser": "ambari-qa", 
+            "hive_user": "hive", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "zk_data_dir": "/hadoop/zookeeper", 
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "initLimit": "10", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "user_group": "hadoop", 
+            "dtnode_heapsize": "1024m", 
+            "gmond_user": "nobody", 
+            "tickTime": "2000", 
+            "storm_user": "storm", 
+            "clientPort": "2181", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce",  
+            "hdfs_user": "hdfs", 
+            "hbase_user": "hbase", 
+            "webhcat_user": "hcat", 
+            "nodemanager_heapsize": "1024", 
+            "gmetad_user": "nobody", 
+            "namenode_opt_newsize": "200m",
+            "namenode_opt_permsize" : "128m",
+            "namenode_opt_maxpermsize" : "256m", 
+            "mapred_user": "mapred", 
+            "resourcemanager_heapsize": "1024", 
+            "hcat_user": "hcat", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
+        }, 
+        "capacity-scheduler": {
+            "yarn.scheduler.capacity.node-locality-delay": "40", 
+            "yarn.scheduler.capacity.root.capacity": "100", 
+            "yarn.scheduler.capacity.root.acl_administer_queues": "*", 
+            "yarn.scheduler.capacity.root.queues": "default", 
+            "yarn.scheduler.capacity.maximum-applications": "10000", 
+            "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
+            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
+            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
+            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", 
+            "yarn.scheduler.capacity.root.default.capacity": "100", 
+            "yarn.scheduler.capacity.root.default.acl_submit_jobs": "*"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.namenode.shared.edits.dir": "qjournal://c6401.ambari.apache.org:8485;c6402.ambari.apache.org:8485;c6403.ambari.apache.org:8485/ns1", 
+            "dfs.namenode.rpc-address.ns1.nn1": "c6401.ambari.apache.org:8020", 
+            "dfs.namenode.http-address.ns1.nn2": "c6402.ambari.apache.org:50070", 
+            "dfs.namenode.http-address.ns1.nn1": "c6401.ambari.apache.org:50070", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.namenode.rpc-address.ns1.nn2": "c6402.ambari.apache.org:8020", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
+            "dfs.cluster.administrators": " hdfs", 
+            "ambari.dfs.datanode.http.port": "50075", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1.0f", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "dfs.ha.automatic-failover.enabled": "true", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journal", 
+            "dfs.blocksize": "134217728", 
+            "dfs.datanode.max.transfer.threads": "1024", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "100", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
+            "dfs.nameservices": "ns1", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.https-address.ns1.nn2": "c6402.ambari.apache.org:50470", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.namenode.https-address.ns1.nn1": "c6401.ambari.apache.org:50470", 
+            "dfs.client.failover.proxy.provider.ns1": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "ambari.dfs.datanode.port": "50010", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
+            "dfs.ha.fencing.methods": "shell(/bin/true)", 
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.ha.namenodes.ns1": "nn1,nn2", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.https.port": "50470", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600"
+        }, 
+        "yarn-log4j": {
+            "log4j.appender.JSA.layout.ConversionPattern": "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n", 
+            "log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary": "${yarn.server.resourcemanager.appsummary.logger}", 
+            "log4j.appender.RMSUMMARY.File": "/var/log/hadoop-yarn/yarn/${yarn.server.resourcemanager.appsummary.log.file}", 
+            "log4j.appender.RMSUMMARY.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.RMSUMMARY.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "hadoop.mapreduce.jobsummary.log.file": "hadoop-mapreduce.jobsummary.log", 
+            "log4j.appender.RMSUMMARY.MaxBackupIndex": "20", 
+            "log4j.appender.RMSUMMARY": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.JSA": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.mapreduce.jobsummary.logger": "${hadoop.root.logger}", 
+            "yarn.server.resourcemanager.appsummary.log.file": "hadoop-mapreduce.jobsummary.log", 
+            "log4j.appender.JSA.DatePattern": ".yyyy-MM-dd", 
+            "yarn.server.resourcemanager.appsummary.logger": "${hadoop.root.logger}", 
+            "log4j.appender.JSA.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.RMSUMMARY.MaxFileSize": "256MB", 
+            "log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary": "false"
+        }, 
+        "core-site": {
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "gluster.daemon.user": "null", 
+            "fs.trash.interval": "360", 
+            "hadoop.security.authentication": "simple", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "fs.AbstractFileSystem.glusterfs.impl": "null", 
+            "fs.defaultFS": "hdfs://ns1", 
+            "ipc.client.connect.max.retries": "50", 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.security.authorization": "false", 
+            "ha.zookeeper.quorum": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181", 
+            "ipc.client.connection.maxidletime": "30000", 
+            "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT"
+        }, 
+        "hdfs-log4j": {
+            "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "log4j.appender.DRFAAUDIT.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.EventCounter": "org.apache.hadoop.log.metrics.EventCounter", 
+            "log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit": "false", 
+            "log4j.appender.DRFAS.File": "${hadoop.log.dir}/${hadoop.security.log.file}", 
+            "log4j.appender.NullAppender": "org.apache.log4j.varia.NullAppender", 
+            "log4j.appender.MRAUDIT.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "log4j.additivity.org.apache.hadoop.mapred.AuditLogger": "false", 
+            "log4j.appender.DRFAS": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.tasklog.noKeepSplits": "4", 
+            "log4j.appender.DRFAAUDIT": "org.apache.log4j.DailyRollingFileAppender", 
+            "log4j.appender.DRFAAUDIT.File": "${hadoop.log.dir}/hdfs-audit.log", 
+            "log4j.appender.DRFAS.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.MRAUDIT": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.security.log.maxbackupindex": "20", 
+            "log4j.appender.DRFA.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.console.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service": "ERROR", 
+            "log4j.appender.RFA.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.tasklog.taskid": "null", 
+            "log4j.appender.RFAS.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.root.logger": "INFO,console", 
+            "hadoop.security.logger": "INFO,console", 
+            "log4j.appender.DRFAAUDIT.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "log4j.appender.RFAS.MaxFileSize": "${hadoop.security.log.maxfilesize}", 
+            "log4j.appender.MRAUDIT.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.RFA.File": "${hadoop.log.dir}/${hadoop.log.file}", 
+            "log4j.appender.RFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "log4j.appender.TLA": "org.apache.hadoop.mapred.TaskLogAppender", 
+            "log4j.logger.org.apache.hadoop.metrics2": "${hadoop.metrics.log.level}", 
+            "log4j.appender.DRFA.File": "${hadoop.log.dir}/${hadoop.log.file}", 
+            "log4j.appender.TLA.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.log.file": "hadoop.log", 
+            "hadoop.security.log.file": "SecurityAuth.audit", 
+            "log4j.appender.console.target": "System.err", 
+            "log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit": "${hdfs.audit.logger}", 
+            "hdfs.audit.logger": "INFO,console", 
+            "log4j.appender.RFAS.MaxBackupIndex": "${hadoop.security.log.maxbackupindex}", 
+            "log4j.appender.TLA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "hadoop.tasklog.iscleanup": "false", 
+            "mapred.audit.logger": "INFO,console", 
+            "log4j.appender.DRFAAUDIT.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.tasklog.logsRetainHours": "12", 
+            "log4j.appender.MRAUDIT.File": "${hadoop.log.dir}/mapred-audit.log", 
+            "log4j.appender.TLA.totalLogFileSize": "${hadoop.tasklog.totalLogFileSize}", 
+            "log4j.appender.DRFA": "org.apache.log4j.DailyRollingFileAppender", 
+            "log4j.category.SecurityLogger": "${hadoop.security.logger}", 
+            "hadoop.tasklog.totalLogFileSize": "100", 
+            "log4j.appender.RFA.MaxFileSize": "256MB", 
+            "log4j.appender.RFAS": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.RFA": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.RFA.layout.ConversionPattern": "%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n", 
+            "log4j.appender.DRFAS.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.threshhold": "ALL", 
+            "log4j.appender.TLA.isCleanup": "${hadoop.tasklog.iscleanup}", 
+            "log4j.appender.TLA.taskId": "${hadoop.tasklog.taskid}", 
+            "log4j.appender.console.layout.ConversionPattern": "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n", 
+            "log4j.appender.MRAUDIT.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.console": "org.apache.log4j.ConsoleAppender", 
+            "hadoop.log.dir": ".", 
+            "hadoop.security.log.maxfilesize": "256MB", 
+            "hadoop.metrics.log.level": "INFO", 
+            "log4j.appender.RFA.MaxBackupIndex": "10", 
+            "log4j.rootLogger": "${hadoop.root.logger}, EventCounter", 
+            "log4j.appender.RFAS.File": "${hadoop.log.dir}/${hadoop.security.log.file}", 
+            "log4j.logger.org.apache.hadoop.mapred.AuditLogger": "${mapred.audit.logger}", 
+            "hadoop.tasklog.purgeLogSplits": "true", 
+            "log4j.appender.DRFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n"
+        }, 
+        "zookeeper-log4j": {
+            "log4j.appender.CONSOLE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n", 
+            "log4j.appender.CONSOLE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n", 
+            "log4j.appender.ROLLINGFILE": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.CONSOLE.Threshold": "INFO", 
+            "log4j.appender.CONSOLE": "org.apache.log4j.ConsoleAppender", 
+            "log4j.appender.ROLLINGFILE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.Threshold": "TRACE", 
+            "log4j.appender.ROLLINGFILE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n", 
+            "log4j.appender.TRACEFILE": "org.apache.log4j.FileAppender", 
+            "log4j.appender.TRACEFILE.File": "zookeeper_trace.log", 
+            "log4j.appender.ROLLINGFILE.File": "zookeeper.log", 
+            "log4j.appender.ROLLINGFILE.MaxFileSize": "10MB", 
+            "log4j.appender.ROLLINGFILE.Threshold": "DEBUG"
+        }, 
+        "yarn-site": {
+            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
+            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 
+            "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", 
+            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025", 
+            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
+            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org", 
+            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
+            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", 
+            "yarn.nodemanager.resource.memory-mb": "2048", 
+            "yarn.scheduler.minimum-allocation-mb": "683", 
+            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050", 
+            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030", 
+            "yarn.log-aggregation.retain-seconds": "2592000", 
+            "yarn.scheduler.maximum-allocation-mb": "2048", 
+            "yarn.log-aggregation-enable": "true", 
+            "yarn.nodemanager.address": "0.0.0.0:45454", 
+            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
+            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
+            "yarn.nodemanager.log.retain-second": "604800", 
+            "yarn.nodemanager.delete.debug-delay-sec": "0", 
+            "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
+            "yarn.nodemanager.health-checker.interval-ms": "135000", 
+            "yarn.resourcemanager.am.max-attempts": "2", 
+            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
+            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
+            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
+            "yarn.nodemanager.vmem-check-enabled": "false", 
+            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
+            "yarn.admin.acl": "*", 
+            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088", 
+            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", 
+            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
+            "yarn.acl.enable": "true", 
+            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs", 
+            "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*", 
+            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 
+            "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler"
+        },
+        "yarn-env": {
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "apptimelineserver_heapsize": "1024", 
+            "nodemanager_heapsize": "1024", 
+            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"", 
+            "yarn_heapsize": "1024", 
+            "yarn_user": "yarn", 
+            "resourcemanager_heapsize": "1024", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
+            "min_user_id": "1000"
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
+        "hadoop-env": {
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "namenode_heapsize": "1024m", 
+            "namenode_opt_newsize": "200m",
+            "namenode_opt_permsize" : "128m",
+            "namenode_opt_maxpermsize" : "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}
 }/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/
 $USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HAD
 OOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n#
  The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
+            "hdfs_user": "hdfs",
+            "dtnode_heapsize": "1024m", 
+            "proxyuser_group": "users",
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "dfs_ha_initial_namenode_active" : "c6401.ambari.apache.org",
+            "dfs_ha_initial_namenode_standby" : "c6402.ambari.apache.org"
+        },
+        "hive-env": {
+            "hcat_pid_dir": "/var/run/webhcat", 
+            "hcat_user": "hcat", 
+            "hive_ambari_database": "MySQL", 
+            "hive_hostname": "abtest-3.c.pramod-thangali.internal", 
+            "hive_metastore_port": "9083", 
+            "webhcat_user": "hcat", 
+            "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Director
 y can be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}", 
+            "hive_database_name": "hive", 
+            "hive_database_type": "mysql", 
+            "hive_pid_dir": "/var/run/hive", 
+            "hive_log_dir": "/var/log/hive", 
+            "hive_user": "hive", 
+            "hcat_log_dir": "/var/log/webhcat", 
+            "hive_database": "New MySQL Database"
+        },
+        "hbase-env": {
+            "hbase_pid_dir": "/var/run/hbase", 
+            "hbase_user": "hbase", 
+            "hbase_master_heapsize": "1024m", 
+            "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintG
 CDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# 
 Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\
 "$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% endif %}", 
+            "hbase_regionserver_heapsize": "1024m",
+            "hbase_regionserver_xmn_max": "512",
+            "hbase_regionserver_xmn_ratio": "0.2",
+            "hbase_log_dir": "/var/log/hbase"
+        },
+        "ganglia-env": {
+            "gmond_user": "nobody", 
+            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
+            "rrdcached_base_dir": "/var/lib/ganglia/rrds", 
+            "rrdcached_flush_timeout": "7200", 
+            "gmetad_user": "nobody", 
+            "rrdcached_write_threads": "4", 
+            "rrdcached_delay": "1800", 
+            "rrdcached_timeout": "3600"
+        },
+        "zookeeper-env": {
+            "clientPort": "2181", 
+            "zk_user": "zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "syncLimit": "5", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "initLimit": "10", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_data_dir": "/hadoop/zookeeper", 
+            "tickTime": "2000"
+        },
+        "mapred-env": {
+            "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "mapred_user": "mapred", 
+            "jobhistory_heapsize": "900", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+        },
+        "tez-env": {
+            "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
+            "tez_user": "tez"
+        }, 
+        "storm-env": {
+            "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\n# export STORM_CONF_DIR=\"\"", 
+            "storm_log_dir": "/var/log/storm", 
+            "storm_pid_dir": "/var/run/storm", 
+            "storm_user": "storm"
+        }, 
+        "falcon-env": {
+            "falcon_port": "15000", 
+            "falcon_pid_dir": "/var/run/falcon", 
+            "falcon_log_dir": "/var/log/falcon", 
+            "falcon.emeddedmq.port": "61616", 
+            "falcon_user": "falcon", 
+            "falcon_local_dir": "/hadoop/falcon", 
+            "content": "\n# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java_home}}\n\n# any additional java opts you want to set. This will apply to both client and server operations\n#export FALCON_OPTS=\n\n# any additional java opts that you want to set for client only\n#export FALCON_CLIENT_OPTS=\n\n# java heap size we want to set for the client. Default is 1024MB\n#export FALCON_CLIENT_HEAP=\n\n# any additional opts you want to set for prisim service.\n#export FALCON_PRISM_OPTS=\n\n# java heap size we want to set for the prisim service. Default is 1024MB\n#export FALCON_PRISM_HEAP=\n\n# any additional opts you want to set for falcon service.\nexport FALCON_SERVER_OPTS=\"-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Dfalcon.emeddedmq.port={{falcon_emeddedmq_port}}\"\n\n# java heap size we want to set for the falcon server. Default is 1024MB\n#export FALCON_SERVER_HEAP=\n\n# What is is considered as falco
 n home dir. Default is the base locaion of the installed software\n#export FALCON_HOME_DIR=\n\n# Where log files are stored. Defatult is logs directory under the base install location\nexport FALCON_LOG_DIR={{falcon_log_dir}}\n\n# Where pid files are stored. Defatult is logs directory under the base install location\nexport FALCON_PID_DIR={{falcon_pid_dir}}\n\n# where the falcon active mq data is stored. Defatult is logs/data directory under the base install location\nexport FALCON_DATA_DIR={{falcon_embeddedmq_data}}\n\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\n#export FALCON_EXPANDED_WEBAPP_DIR=", 
+            "falcon.embeddedmq.data": "/hadoop/falcon/embeddedmq/data", 
+            "falcon.embeddedmq": "true", 
+            "falcon_store_uri": "file:///hadoop/falcon/store"
+        }, 
+        "oozie-env": {
+            "oozie_derby_database": "Derby", 
+            "oozie_admin_port": "11001", 
+            "oozie_hostname": "abtest-3.c.pramod-thangali.internal", 
+            "oozie_pid_dir": "/var/run/oozie", 
+            "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n  export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}\n  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie config
 uration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64", 
+            "oozie_user": "oozie", 
+            "oozie_database": "New Derby Database", 
+            "oozie_data_dir": "/hadoop/oozie/data", 
+            "oozie_log_dir": "/var/log/oozie"
+        }, 
+        "webhcat-env": {
+            "content": "\n# The file containing the running pid\nPID_FILE={{pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=/usr/lib/hadoop"
+        }, 
+        "pig-env": {
+            "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
+        }, 
+        "sqoop-env": {
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
+            "sqoop_user": "sqoop"
+        }
+    },
+    "configuration_attributes": {
+      "mapred-site": {
+        "final": {
+          "mapred.healthChecker.script.path": "true",
+          "mapreduce.jobtracker.staging.root.dir": "true"
+        }
+      },
+      "oozie-site": {
+        "final": {
+          "oozie.service.PurgeService.purge.interval": "true",
+          "oozie.service.CallableQueueService.queue.size": "true"
+        }
+      },
+      "webhcat-site": {
+        "final": {
+          "templeton.pig.path": "true",
+          "templeton.exec.timeout": "true",
+          "templeton.override.enabled": "true"
+        }
+      },
+      "hdfs-site": {
+        "final": {
+          "dfs.web.ugi": "true",
+          "dfs.support.append": "true",
+          "dfs.cluster.administrators": "true"
+        }
+      },
+      "hbase-site": {
+        "final": {
+          "hbase.client.keyvalue.maxsize": "true",
+          "hbase.hstore.compactionThreshold": "true",
+          "hbase.rootdir": "true"
+        }
+      },
+      "core-site": {
+        "final": {
+          "hadoop.proxyuser.hive.groups": "true",
+          "webinterface.private.actions": "true",
+          "hadoop.proxyuser.oozie.hosts": "true"
+        }
+      },
+      "hive-site": {
+        "final": {
+          "javax.jdo.option.ConnectionPassword": "true",
+          "javax.jdo.option.ConnectionDriverName": "true",
+          "hive.optimize.bucketmapjoin.sortedmerge": "true"
+        }
+      }
+    },
+    "configurationTags": {
+        "capacity-scheduler": {
+            "tag": "version1"
+        }, 
+        "global": {
+            "tag": "version1"
+        }, 
+        "mapred-site": {
+            "tag": "version1"
+        }, 
+        "hdfs-site": {
+            "tag": "version1392401108182"
+        }, 
+        "yarn-log4j": {
+            "tag": "version1"
+        }, 
+        "core-site": {
+            "tag": "version1392401108196"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "yarn-site": {
+            "tag": "version1"
+        }
+    }, 
+    "commandId": "23-4", 
+    "clusterHostInfo": {
+        "ambari_server_host": [
+            "c6401.ambari.apache.org"
+        ],
+        "nm_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "zkfc_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670", 
+            "8670", 
+            "8670"
+        ], 
+        "journalnode_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "rm_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "hs_host": [
+            "c6402.ambari.apache.org"
+        ]
+    }
+}


[3/3] ambari git commit: AMBARI-8771. Add support for deploying HDFS NameNode HA Clusters with Blueprints. (rnettleton)

Posted by rn...@apache.org.
AMBARI-8771. Add support for deploying HDFS NameNode HA Clusters with Blueprints. (rnettleton)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4320de6e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4320de6e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4320de6e

Branch: refs/heads/trunk
Commit: 4320de6eeffda783e1c6592d80bf11f6fbfff31b
Parents: 4f919d2
Author: Bob Nettleton <rn...@hortonworks.com>
Authored: Mon Dec 22 12:31:24 2014 -0500
Committer: Bob Nettleton <rn...@hortonworks.com>
Committed: Mon Dec 22 12:32:09 2014 -0500

----------------------------------------------------------------------
 .../internal/BaseBlueprintProcessor.java        |   7 +
 .../BlueprintConfigurationProcessor.java        | 147 ++++-
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  26 +
 .../HDFS/2.1.0.2.0/package/scripts/params.py    |   5 +
 .../2.1.0.2.0/package/scripts/zkfc_slave.py     |   8 +
 .../internal/BaseBlueprintProcessorTest.java    | 240 +++++++
 .../BlueprintConfigurationProcessorTest.java    | 165 ++++-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   | 178 ++++++
 .../test/python/stacks/2.0.6/HDFS/test_zkfc.py  | 133 ++++
 .../2.0.6/configs/ha_bootstrap_active_node.json | 618 +++++++++++++++++++
 .../configs/ha_bootstrap_standby_node.json      | 618 +++++++++++++++++++
 11 files changed, 2138 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java
index 9cfb635..c2ddad8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java
@@ -366,6 +366,13 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
 
     Collection<String> cardinalityFailures = new HashSet<String>();
 
+    if (BlueprintConfigurationProcessor.isNameNodeHAEnabled(clusterConfig) &&
+      (component.equals("SECONDARY_NAMENODE"))) {
+      // override the cardinality for this component in an HA deployment,
+      // since the SECONDARY_NAMENODE should not be started in this scenario
+      cardinality = new Cardinality("0");
+    }
+
     int actualCount = getHostGroupsForComponent(component, hostGroups).size();
     if (! cardinality.isValidCount(actualCount)) {
       boolean validated = ! isDependencyManaged(stack, component, clusterConfig);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index c4071d4..807723e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -24,6 +24,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -134,6 +135,36 @@ public class BlueprintConfigurationProcessor {
         }
       }
     }
+
+    if (isNameNodeHAEnabled()) {
+      // if the active/stanbdy namenodes are not specified, assign them automatically
+      if (! isNameNodeHAInitialActiveNodeSet(properties) && ! isNameNodeHAInitialStandbyNodeSet(properties)) {
+        Collection<HostGroup> listOfHostGroups = new LinkedList<HostGroup>();
+        for (String key : hostGroups.keySet()) {
+          listOfHostGroups.add(hostGroups.get(key));
+        }
+
+        Collection<HostGroup> hostGroupsContainingNameNode =
+          getHostGroupsForComponent("NAMENODE", listOfHostGroups);
+        // set the properties that configure which namenode is active,
+        // and which is a standby node in this HA deployment
+        Map<String, String> hadoopEnv = properties.get("hadoop-env");
+        if (hostGroupsContainingNameNode.size() == 2) {
+          List<HostGroup> listOfGroups = new LinkedList<HostGroup>(hostGroupsContainingNameNode);
+          hadoopEnv.put("dfs_ha_initial_namenode_active", listOfGroups.get(0).getHostInfo().iterator().next());
+          hadoopEnv.put("dfs_ha_initial_namenode_standby", listOfGroups.get(1).getHostInfo().iterator().next());
+        } else {
+          // handle the case where multiple hosts are mapped to an HA host group
+          if (hostGroupsContainingNameNode.size() == 1) {
+            List<String> listOfInfo = new LinkedList<String>(hostGroupsContainingNameNode.iterator().next().getHostInfo());
+            // there should only be two host names that can include a NameNode install/deployment
+            hadoopEnv.put("dfs_ha_initial_namenode_active", listOfInfo.get(0));
+            hadoopEnv.put("dfs_ha_initial_namenode_standby", listOfInfo.get(1));
+          }
+        }
+      }
+    }
+
     return properties;
   }
 
@@ -275,7 +306,45 @@ public class BlueprintConfigurationProcessor {
    *         false if NameNode HA is not enabled
    */
   boolean isNameNodeHAEnabled() {
-    return properties.containsKey("hdfs-site") && properties.get("hdfs-site").containsKey("dfs.nameservices");
+    return isNameNodeHAEnabled(properties);
+  }
+
+  /**
+   * Static convenience function to determine if NameNode HA is enabled
+   * @param configProperties configuration properties for this cluster
+   * @return true if NameNode HA is enabled
+   *         false if NameNode HA is not enabled
+   */
+  static boolean isNameNodeHAEnabled(Map<String, Map<String, String>> configProperties) {
+    return configProperties.containsKey("hdfs-site") && configProperties.get("hdfs-site").containsKey("dfs.nameservices");
+  }
+
+
+  /**
+   * Convenience method to examine the current configuration, to determine
+   * if the hostname of the initial active namenode in an HA deployment has
+   * been included.
+   *
+   * @param configProperties the configuration for this cluster
+   * @return true if the initial active namenode property has been configured
+   *         false if the initial active namenode property has not been configured
+   */
+  static boolean isNameNodeHAInitialActiveNodeSet(Map<String, Map<String, String>> configProperties) {
+    return configProperties.containsKey("hadoop-env") && configProperties.get("hadoop-env").containsKey("dfs_ha_initial_namenode_active");
+  }
+
+
+  /**
+   * Convenience method to examine the current configuration, to determine
+   * if the hostname of the initial standby namenode in an HA deployment has
+   * been included.
+   *
+   * @param configProperties the configuration for this cluster
+   * @return true if the initial standby namenode property has been configured
+   *         false if the initial standby namenode property has not been configured
+   */
+  static boolean isNameNodeHAInitialStandbyNodeSet(Map<String, Map<String, String>> configProperties) {
+    return configProperties.containsKey("hadoop-env") && configProperties.get("hadoop-env").containsKey("dfs_ha_initial_namenode_standby");
   }
 
 
@@ -456,7 +525,7 @@ public class BlueprintConfigurationProcessor {
   private static Collection<HostGroup> getHostGroupsForComponent(String component,
                                                                  Collection<? extends HostGroup> hostGroups) {
 
-    Collection<HostGroup> resultGroups = new HashSet<HostGroup>();
+    Collection<HostGroup> resultGroups = new LinkedHashSet<HostGroup>();
     for (HostGroup group : hostGroups ) {
       if (group.getComponents().contains(component)) {
         resultGroups.add(group);
@@ -476,7 +545,7 @@ public class BlueprintConfigurationProcessor {
   private static Collection<String> getHostStrings(Map<String, ? extends HostGroup> hostGroups,
                                                    String val) {
 
-    Collection<String> hosts = new HashSet<String>();
+    Collection<String> hosts = new LinkedHashSet<String>();
     Matcher m = HOSTGROUP_PORT_REGEX.matcher(val);
     while (m.find()) {
       String groupName = m.group(1);
@@ -593,6 +662,15 @@ public class BlueprintConfigurationProcessor {
           if (matchingGroups.isEmpty() && cardinality.isValidCount(0)) {
             return origValue;
           } else {
+            if (isNameNodeHAEnabled(properties) && isComponentNameNode() && (matchingGroups.size() == 2)) {
+              // if this is the defaultFS property, it should reflect the nameservice name,
+              // rather than a hostname (used in non-HA scenarios)
+              if (properties.get("core-site").get("fs.defaultFS").equals(origValue)) {
+                return origValue;
+              }
+
+            }
+
             throw new IllegalArgumentException("Unable to update configuration property with topology information. " +
               "Component '" + component + "' is not mapped to any host group or is mapped to multiple groups.");
           }
@@ -601,6 +679,17 @@ public class BlueprintConfigurationProcessor {
     }
 
     /**
+     * Utility method to determine if the component associated with this updater
+     * instance is an HDFS NameNode
+     *
+     * @return true if the component associated is a NameNode
+     *         false if the component is not a NameNode
+     */
+    private boolean isComponentNameNode() {
+      return component.equals("NAMENODE");
+    }
+
+    /**
      * Provides access to the name of the component associated
      *   with this updater instance.
      *
@@ -684,15 +773,19 @@ public class BlueprintConfigurationProcessor {
    * value with the host names which runs the associated component in the new cluster.
    */
   private static class MultipleHostTopologyUpdater implements PropertyUpdater {
+
+
+    private static final Character DEFAULT_SEPARATOR = ',';
+
     /**
      * Component name
      */
-    private String component;
+    private final String component;
 
     /**
      * Separator for multiple property values
      */
-    private Character separator = ',';
+    private final Character separator;
 
     /**
      * Constructor.
@@ -700,7 +793,19 @@ public class BlueprintConfigurationProcessor {
      * @param component  component name associated with the property
      */
     public MultipleHostTopologyUpdater(String component) {
+      this(component, DEFAULT_SEPARATOR);
+    }
+
+    /**
+     * Constructor
+     *
+     * @param component component name associated with this property
+     * @param separator the separator character to use when multiple hosts
+     *                  are specified in a property or URL
+     */
+    public MultipleHostTopologyUpdater(String component, Character separator) {
       this.component = component;
+      this.separator = separator;
     }
 
     /**
@@ -741,6 +846,30 @@ public class BlueprintConfigurationProcessor {
       }
 
       StringBuilder sb = new StringBuilder();
+      String suffix = null;
+      // parse out prefix if one exists
+      Matcher matcher = HOSTGROUP_PORT_REGEX.matcher(origValue);
+      if (matcher.find()) {
+        int indexOfStart = matcher.start();
+        // handle the case of a YAML config property
+        if ((indexOfStart > 0) && (!origValue.substring(0, indexOfStart).equals("['"))) {
+          // append prefix before adding host names
+          sb.append(origValue.substring(0, indexOfStart));
+        }
+
+        // parse out suffix if one exists
+        int indexOfEnd = -1;
+        while (matcher.find()) {
+          indexOfEnd = matcher.end();
+        }
+
+        if (indexOfEnd < (origValue.length() - 1)) {
+          suffix = origValue.substring(indexOfEnd);
+        }
+
+      }
+
+      // add hosts to property, using the specified separator
       boolean firstHost = true;
       for (String host : hostStrings) {
         if (!firstHost) {
@@ -751,6 +880,11 @@ public class BlueprintConfigurationProcessor {
         sb.append(host);
       }
 
+      if ((suffix != null) && (!suffix.equals("']"))) {
+        sb.append(suffix);
+      }
+
+
       return sb.toString();
     }
   }
@@ -961,7 +1095,8 @@ public class BlueprintConfigurationProcessor {
     hdfsSiteMap.put("dfs.namenode.https-address", new SingleHostTopologyUpdater("NAMENODE"));
     coreSiteMap.put("fs.defaultFS", new SingleHostTopologyUpdater("NAMENODE"));
     hbaseSiteMap.put("hbase.rootdir", new SingleHostTopologyUpdater("NAMENODE"));
-    multiHdfsSiteMap.put("dfs.namenode.shared.edits.dir", new MultipleHostTopologyUpdater("JOURNALNODE"));
+    // HDFS shared.edits JournalNode Quorum URL uses semi-colons as separators
+    multiHdfsSiteMap.put("dfs.namenode.shared.edits.dir", new MultipleHostTopologyUpdater("JOURNALNODE", ';'));
 
     // SECONDARY_NAMENODE
     hdfsSiteMap.put("dfs.secondary.http.address", new SingleHostTopologyUpdater("SECONDARY_NAMENODE"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index e8dbc59..2029aac 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -47,6 +47,15 @@ def namenode(action=None, do_format=True, rolling_restart=False, env=None):
               group=params.user_group
     )
 
+    if params.dfs_ha_enabled:
+      # if the current host is the standby NameNode in an HA deployment
+      if params.hostname == params.dfs_ha_namenode_standby:
+        # run the bootstrap command, to start the NameNode in standby mode
+        # this requires that the active NameNode is already up and running,
+        # so this execute should be re-tried upon failure, up to a timeout
+        Execute("hdfs namenode -bootstrapStandby",
+          user = params.hdfs_user, tries=50)
+
     options = "-rollingUpgrade started" if rolling_restart else ""
 
     if rolling_restart:    
@@ -169,6 +178,23 @@ def format_namenode(force=None):
       Directory(mark_dir,
         recursive = True
       )
+  else:
+    if params.dfs_ha_namenode_active is not None:
+      if params.hostname == params.dfs_ha_namenode_active:
+        # check and run the format command in the HA deployment scenario
+        # only format the "active" namenode in an HA deployment
+        File(format("{tmp_dir}/checkForFormat.sh"),
+             content=StaticFile("checkForFormat.sh"),
+             mode=0755)
+        Execute(format(
+          "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
+          "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
+                not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
+                path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+        )
+        Directory(mark_dir,
+                  recursive=True
+        )
 
 
 def decommission():

http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
index c9c81bb..1ac4446 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
@@ -185,6 +185,11 @@ dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None
 dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
 dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
 
+# hostname of the active HDFS HA Namenode (only used when HA is enabled)
+dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
+# hostname of the standby HDFS HA Namenode (only used when HA is enabled)
+dfs_ha_namenode_standby = default("/configurations/hadoop-env/dfs_ha_initial_namenode_standby", None)
+
 namenode_id = None
 namenode_rpc = None
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
index ee8b418..4102b69 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
@@ -41,6 +41,14 @@ class ZkfcSlave(Script):
               owner=params.hdfs_user,
               group=params.user_group
     )
+
+    # format the znode for this HA setup
+    # only run this format command if the active namenode hostname is set
+    # The Ambari UI HA Wizard prompts the user to run this command
+    # manually, so this guarantees it is only run in the Blueprints case
+    if params.dfs_ha_enabled and params.dfs_ha_namenode_active is not None:
+        Execute("hdfs zkfc -formatZK -force -nonInteractive", user=params.hdfs_user)
+
     utils.service(
       action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
       create_log_dir=True

http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessorTest.java
index 8540d8b..84225ac 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessorTest.java
@@ -3,6 +3,7 @@ package org.apache.ambari.server.controller.internal;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.isA;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 import java.util.Collection;
 import java.util.Collections;
@@ -10,9 +11,28 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.StackConfigurationResponse;
+import org.apache.ambari.server.controller.StackServiceComponentResponse;
 import org.apache.ambari.server.controller.StackServiceResponse;
+import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
+import org.apache.ambari.server.controller.spi.NoSuchResourceException;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.RequestStatus;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
+import org.apache.ambari.server.orm.entities.BlueprintConfigEntity;
+import org.apache.ambari.server.orm.entities.BlueprintEntity;
+import org.apache.ambari.server.orm.entities.HostGroupComponentEntity;
+import org.apache.ambari.server.orm.entities.HostGroupConfigEntity;
+import org.apache.ambari.server.orm.entities.HostGroupEntity;
+import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.DependencyInfo;
+import org.apache.ambari.server.state.ServiceInfo;
 import org.easymock.EasyMockSupport;
 import org.junit.Before;
 import org.junit.Test;
@@ -609,6 +629,226 @@ public class BaseBlueprintProcessorTest {
     mockSupport.verifyAll();
   }
 
+
+  @Test
+  public void testValidationOverrideForSecondaryNameNodeWithHA() throws Exception {
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    AmbariManagementController mockController =
+      mockSupport.createMock(AmbariManagementController.class);
+
+    AmbariMetaInfo mockMetaInfo =
+      mockSupport.createMock(AmbariMetaInfo.class);
+
+    BaseBlueprintProcessor.stackInfo = mockMetaInfo;
+
+    ServiceInfo serviceInfo = new ServiceInfo();
+    serviceInfo.setName("HDFS");
+
+    StackServiceResponse stackServiceResponse =
+      new StackServiceResponse(serviceInfo);
+
+    ComponentInfo componentInfo = new ComponentInfo();
+    componentInfo.setName("SECONDARY_NAMENODE");
+    // simulate the stack requirements that there
+    // always be one SECONDARY_NAMENODE per cluster
+    componentInfo.setCardinality("1");
+
+    StackServiceComponentResponse stackComponentResponse =
+      new StackServiceComponentResponse(componentInfo);
+
+    ComponentInfo componentInfoNameNode = new ComponentInfo();
+    componentInfoNameNode.setName("NAMENODE");
+    componentInfo.setCardinality("1-2");
+    StackServiceComponentResponse stackServiceComponentResponseTwo =
+      new StackServiceComponentResponse(componentInfoNameNode);
+
+    Set<StackServiceComponentResponse> responses =
+      new HashSet<StackServiceComponentResponse>();
+    responses.add(stackComponentResponse);
+    responses.add(stackServiceComponentResponseTwo);
+
+    expect(mockController.getStackServices(isA(Set.class))).andReturn(
+      Collections.singleton(stackServiceResponse));
+    expect(mockController.getStackComponents(isA(Set.class))).andReturn(
+      responses);
+    expect(mockController.getStackConfigurations(isA(Set.class))).andReturn(Collections.<StackConfigurationResponse>emptySet());
+    expect(mockController.getStackLevelConfigurations(isA(Set.class))).andReturn(Collections.<StackConfigurationResponse>emptySet());
+
+    expect(mockMetaInfo.getComponentDependencies("HDP", "2.0.6", "HDFS", "SECONDARY_NAMENODE")).andReturn(Collections.<DependencyInfo>emptyList());
+    expect(mockMetaInfo.getComponentDependencies("HDP", "2.0.6", "HDFS", "NAMENODE")).andReturn(Collections.<DependencyInfo>emptyList());
+
+
+    mockSupport.replayAll();
+
+    BaseBlueprintProcessor baseBlueprintProcessor =
+      new BaseBlueprintProcessor(Collections.<String>emptySet(), Collections.<Resource.Type, String>emptyMap(), mockController) {
+        @Override
+        protected Set<String> getPKPropertyIds() {
+          return null;
+        }
+
+        @Override
+        public RequestStatus createResources(Request request) throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException {
+          return null;
+        }
+
+        @Override
+        public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+          return null;
+        }
+
+        @Override
+        public RequestStatus updateResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+          return null;
+        }
+
+        @Override
+        public RequestStatus deleteResources(Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+          return null;
+        }
+      };
+
+    HostGroupComponentEntity hostGroupComponentEntity =
+      new HostGroupComponentEntity();
+    // don't include the SECONDARY_NAMENODE in this entity
+    hostGroupComponentEntity.setName("NAMENODE");
+
+    HostGroupEntity hostGroupEntity =
+      new HostGroupEntity();
+    hostGroupEntity.setName("host-group-one");
+    hostGroupEntity.setComponents(Collections.singleton(hostGroupComponentEntity));
+    hostGroupEntity.setConfigurations(Collections.<HostGroupConfigEntity>emptyList());
+
+    // setup config entity to simulate the case of NameNode HA being enabled
+    BlueprintConfigEntity configEntity =
+      new BlueprintConfigEntity();
+    configEntity.setConfigData("{\"dfs.nameservices\":\"mycluster\",\"key4\":\"value4\"}");
+    configEntity.setType("hdfs-site");
+
+    BlueprintEntity testEntity =
+      new BlueprintEntity();
+    testEntity.setBlueprintName("test-blueprint");
+    testEntity.setStackName("HDP");
+    testEntity.setStackVersion("2.0.6");
+    testEntity.setHostGroups(Collections.singleton(hostGroupEntity));
+    testEntity.setConfigurations(Collections.singleton(configEntity));
+
+    baseBlueprintProcessor.validateTopology(testEntity);
+
+    mockSupport.verifyAll();
+  }
+
+  @Test
+  public void testValidationOverrideForSecondaryNameNodeWithoutHA() throws Exception {
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    AmbariManagementController mockController =
+      mockSupport.createMock(AmbariManagementController.class);
+
+    AmbariMetaInfo mockMetaInfo =
+      mockSupport.createMock(AmbariMetaInfo.class);
+
+    BaseBlueprintProcessor.stackInfo = mockMetaInfo;
+
+    ServiceInfo serviceInfo = new ServiceInfo();
+    serviceInfo.setName("HDFS");
+
+    StackServiceResponse stackServiceResponse =
+      new StackServiceResponse(serviceInfo);
+
+    ComponentInfo componentInfo = new ComponentInfo();
+    componentInfo.setName("SECONDARY_NAMENODE");
+    // simulate the stack requirements that there
+    // always be one SECONDARY_NAMENODE per cluster
+    componentInfo.setCardinality("1");
+
+    StackServiceComponentResponse stackComponentResponse =
+      new StackServiceComponentResponse(componentInfo);
+
+    ComponentInfo componentInfoNameNode = new ComponentInfo();
+    componentInfoNameNode.setName("NAMENODE");
+    componentInfo.setCardinality("1-2");
+    StackServiceComponentResponse stackServiceComponentResponseTwo =
+      new StackServiceComponentResponse(componentInfoNameNode);
+
+    Set<StackServiceComponentResponse> responses =
+      new HashSet<StackServiceComponentResponse>();
+    responses.add(stackComponentResponse);
+    responses.add(stackServiceComponentResponseTwo);
+
+    expect(mockController.getStackServices(isA(Set.class))).andReturn(
+      Collections.singleton(stackServiceResponse));
+    expect(mockController.getStackComponents(isA(Set.class))).andReturn(
+      responses);
+    expect(mockController.getStackConfigurations(isA(Set.class))).andReturn(Collections.<StackConfigurationResponse>emptySet());
+    expect(mockController.getStackLevelConfigurations(isA(Set.class))).andReturn(Collections.<StackConfigurationResponse>emptySet());
+
+    expect(mockMetaInfo.getComponentDependencies("HDP", "2.0.6", "HDFS", "SECONDARY_NAMENODE")).andReturn(Collections.<DependencyInfo>emptyList());
+    expect(mockMetaInfo.getComponentDependencies("HDP", "2.0.6", "HDFS", "NAMENODE")).andReturn(Collections.<DependencyInfo>emptyList());
+
+
+    mockSupport.replayAll();
+
+    BaseBlueprintProcessor baseBlueprintProcessor =
+      new BaseBlueprintProcessor(Collections.<String>emptySet(), Collections.<Resource.Type, String>emptyMap(), mockController) {
+        @Override
+        protected Set<String> getPKPropertyIds() {
+          return null;
+        }
+
+        @Override
+        public RequestStatus createResources(Request request) throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException {
+          return null;
+        }
+
+        @Override
+        public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+          return null;
+        }
+
+        @Override
+        public RequestStatus updateResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+          return null;
+        }
+
+        @Override
+        public RequestStatus deleteResources(Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+          return null;
+        }
+      };
+
+    HostGroupComponentEntity hostGroupComponentEntity =
+      new HostGroupComponentEntity();
+    // don't include the SECONDARY_NAMENODE in this entity
+    hostGroupComponentEntity.setName("NAMENODE");
+
+    HostGroupEntity hostGroupEntity =
+      new HostGroupEntity();
+    hostGroupEntity.setName("host-group-one");
+    hostGroupEntity.setComponents(Collections.singleton(hostGroupComponentEntity));
+    hostGroupEntity.setConfigurations(Collections.<HostGroupConfigEntity>emptyList());
+
+
+
+    BlueprintEntity testEntity =
+      new BlueprintEntity();
+    testEntity.setBlueprintName("test-blueprint");
+    testEntity.setStackName("HDP");
+    testEntity.setStackVersion("2.0.6");
+    testEntity.setHostGroups(Collections.singleton(hostGroupEntity));
+    testEntity.setConfigurations(Collections.<BlueprintConfigEntity>emptyList());
+
+    try {
+      baseBlueprintProcessor.validateTopology(testEntity);
+      fail("IllegalArgumentException should have been thrown");
+    } catch (IllegalArgumentException expectedException) {
+      // expected exception
+    }
+
+    mockSupport.verifyAll();
+  }
+
   /**
    * Convenience class for easier setup/initialization of dependencies for unit
    * testing.

http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index abc0b15..3796bbb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -31,6 +31,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
 
@@ -1225,6 +1226,7 @@ public class BlueprintConfigurationProcessorTest {
   public void testDoUpdateForClusterWithNameNodeHAEnabled() throws Exception {
     final String expectedNameService = "mynameservice";
     final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "serverTwo";
     final String expectedPortNum = "808080";
     final String expectedNodeOne = "nn1";
     final String expectedNodeTwo = "nn2";
@@ -1233,8 +1235,15 @@ public class BlueprintConfigurationProcessorTest {
     EasyMockSupport mockSupport = new EasyMockSupport();
 
     HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
 
-    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, "serverTwo")).atLeastOnce();
+    Stack mockStack = mockSupport.createMock(Stack.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName)).atLeastOnce();
+    expect(mockHostGroupTwo.getHostInfo()).andReturn(Arrays.asList(expectedHostNameTwo)).atLeastOnce();
+    expect(mockHostGroupOne.getComponents()).andReturn(Collections.singleton("NAMENODE")).atLeastOnce();
+    expect(mockHostGroupTwo.getComponents()).andReturn(Collections.singleton("NAMENODE")).atLeastOnce();
+    expect(mockStack.getCardinality("NAMENODE")).andReturn(new Cardinality("1-2")).atLeastOnce();
 
     mockSupport.replayAll();
 
@@ -1243,8 +1252,15 @@ public class BlueprintConfigurationProcessorTest {
 
     Map<String, String> hdfsSiteProperties =
       new HashMap<String, String>();
+    Map<String, String> hadoopEnvProperties =
+      new HashMap<String, String>();
+    Map<String, String> coreSiteProperties =
+      new HashMap<String, String>();
+
 
     configProperties.put("hdfs-site", hdfsSiteProperties);
+    configProperties.put("hadoop-env", hadoopEnvProperties);
+    configProperties.put("core-site", coreSiteProperties);
 
     // setup hdfs HA config for test
     hdfsSiteProperties.put("dfs.nameservices", expectedNameService);
@@ -1259,6 +1275,94 @@ public class BlueprintConfigurationProcessorTest {
     hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne, createExportedAddress(expectedPortNum, expectedHostGroupName));
     hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo, createExportedAddress(expectedPortNum, expectedHostGroupName));
 
+    // configure the defaultFS to use the nameservice URL
+    coreSiteProperties.put("fs.defaultFS", "hdfs://" + expectedNameService);
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    Map<String, HostGroup> mapOfHostGroups = new LinkedHashMap<String, HostGroup>();
+    mapOfHostGroups.put(expectedHostGroupName, mockHostGroupOne);
+    mapOfHostGroups.put("host-group-2", mockHostGroupTwo);
+
+    configProcessor.doUpdateForClusterCreate(mapOfHostGroups, mockStack);
+
+    // verify that the expected hostname was substituted for the host group name in the config
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+      expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
+
+    // verify that the Blueprint config processor has set the internal required properties
+    // that determine the active and standby node hostnames for this HA setup
+    assertEquals("Active Namenode hostname was not set correctly",
+      expectedHostName, hadoopEnvProperties.get("dfs_ha_initial_namenode_active"));
+
+    assertEquals("Standby Namenode hostname was not set correctly",
+      expectedHostNameTwo, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+
+    assertEquals("fs.defaultFS should not be modified by cluster update when NameNode HA is enabled.",
+                 "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+
+    mockSupport.verifyAll();
+  }
+
+  @Test
+  public void testDoUpdateForClusterWithNameNodeHAEnabledAndActiveNodeSet() throws Exception {
+    final String expectedNameService = "mynameservice";
+    final String expectedHostName = "serverThree";
+    final String expectedHostNameTwo = "serverFour";
+    final String expectedPortNum = "808080";
+    final String expectedNodeOne = "nn1";
+    final String expectedNodeTwo = "nn2";
+    final String expectedHostGroupName = "host_group_1";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostName, expectedHostNameTwo)).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hdfsSiteProperties =
+      new HashMap<String, String>();
+
+    Map<String, String> hadoopEnvProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hdfs-site", hdfsSiteProperties);
+    configProperties.put("hadoop-env", hadoopEnvProperties);
+
+    // setup hdfs HA config for test
+    hdfsSiteProperties.put("dfs.nameservices", expectedNameService);
+    hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne + ", " + expectedNodeTwo);
+
+    // setup properties that include exported host group information
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne, createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo, createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne, createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo, createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne, createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo, createExportedAddress(expectedPortNum, expectedHostGroupName));
+
+    // set hadoop-env properties to explicitly configure the initial
+    // active and stanbdy namenodes
+    hadoopEnvProperties.put("dfs_ha_initial_namenode_active", expectedHostName);
+    hadoopEnvProperties.put("dfs_ha_initial_namenode_standby", expectedHostNameTwo);
 
     BlueprintConfigurationProcessor configProcessor =
       new BlueprintConfigurationProcessor(configProperties);
@@ -1284,6 +1388,15 @@ public class BlueprintConfigurationProcessorTest {
     assertEquals("HTTPS address HA property not properly exported",
       expectedHostName + ":" + expectedPortNum, hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
 
+    // verify that the Blueprint config processor has not overridden
+    // the user's configuration to determine the active and
+    // standby nodes in this NameNode HA cluster
+    assertEquals("Active Namenode hostname was not set correctly",
+      expectedHostName, hadoopEnvProperties.get("dfs_ha_initial_namenode_active"));
+
+    assertEquals("Standby Namenode hostname was not set correctly",
+      expectedHostNameTwo, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+
     mockSupport.verifyAll();
   }
 
@@ -1841,6 +1954,56 @@ public class BlueprintConfigurationProcessorTest {
   }
 
   @Test
+  public void testHDFSConfigClusterUpdateQuorumJournalURL() throws Exception {
+    final String expectedHostNameOne = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "c6402.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
+    final String expectedHostGroupNameTwo = "host_group_2";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    HostGroup mockHostGroupOne = mockSupport.createMock(HostGroup.class);
+    HostGroup mockHostGroupTwo = mockSupport.createMock(HostGroup.class);
+
+    expect(mockHostGroupOne.getHostInfo()).andReturn(Arrays.asList(expectedHostNameOne)).atLeastOnce();
+    expect(mockHostGroupTwo.getHostInfo()).andReturn(Arrays.asList(expectedHostNameTwo)).atLeastOnce();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> configProperties =
+      new HashMap<String, Map<String, String>>();
+
+    Map<String, String> hdfsSiteProperties =
+      new HashMap<String, String>();
+
+    configProperties.put("hdfs-site", hdfsSiteProperties);
+
+    // setup properties that include host information
+    // setup shared edit property, that includes a qjournal URL scheme
+    hdfsSiteProperties.put("dfs.namenode.shared.edits.dir", "qjournal://" + createExportedAddress(expectedPortNum, expectedHostGroupName) + ";" + createExportedAddress(expectedPortNum, expectedHostGroupNameTwo) + "/mycluster");
+
+    BlueprintConfigurationProcessor configProcessor =
+      new BlueprintConfigurationProcessor(configProperties);
+
+    Map<String, HostGroup> mapOfHostGroups =
+      new HashMap<String, HostGroup>();
+    mapOfHostGroups.put(expectedHostGroupName, mockHostGroupOne);
+    mapOfHostGroups.put(expectedHostGroupNameTwo, mockHostGroupTwo);
+
+    // call top-level export method
+    configProcessor.doUpdateForClusterCreate(mapOfHostGroups, null);
+
+    // expect that all servers are included in the updated config, and that the qjournal URL format is preserved
+    assertEquals("HDFS HA shared edits directory property not properly updated for cluster create.",
+      "qjournal://" + createHostAddress(expectedHostNameOne, expectedPortNum) + ";" + createHostAddress(expectedHostNameTwo, expectedPortNum) + "/mycluster",
+      hdfsSiteProperties.get("dfs.namenode.shared.edits.dir"));
+
+    mockSupport.verifyAll();
+
+  }
+
+  @Test
   public void testHiveConfigExported() throws Exception {
     final String expectedHostName = "c6401.apache.ambari.org";
     final String expectedHostNameTwo = "c6402.ambari.apache.org";

http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 8e7414f..78a2f72 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -435,6 +435,184 @@ class TestNamenode(RMFTestCase):
     )
     self.assertNoMoreResources()
 
+  # tests namenode start command when NameNode HA is enabled, and
+  # the HA cluster is started initially, rather than using the UI Wizard
+  def test_start_ha_bootstrap_active_from_blueprint(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "start",
+                       config_file="ha_bootstrap_active_node.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assert_configure_default()
+
+    # verify that active namenode was formatted
+    self.assertResourceCalled('File', '/tmp/checkForFormat.sh',
+                              content = StaticFile('checkForFormat.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /usr/bin /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/ || test -d /var/lib/hdfs/namenode/formatted/',
+                              )
+    self.assertResourceCalled('Directory', '/var/lib/hdfs/namenode/formatted/',
+                              recursive = True,
+                              )
+
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
+                              action = ['delete'],
+                              not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+                              )
+    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited &&  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
+                              environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+                              )
+    self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
+                              path = ['/usr/bin'],
+                              tries = 40,
+                              only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
+                              user = 'hdfs',
+                              try_sleep = 10,
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/tmp',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0777,
+                              owner = 'hdfs',
+                              bin_dir = '/usr/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0770,
+                              owner = 'ambari-qa',
+                              bin_dir = '/usr/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              action = ['create'],
+                              bin_dir = '/usr/bin',
+                              only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
+                              )
+    self.assertNoMoreResources()
+
+  # tests namenode start command when NameNode HA is enabled, and
+  # the HA cluster is started initially, rather than using the UI Wizard
+  # this test verifies the startup of a "standby" namenode
+  def test_start_ha_bootstrap_standby_from_blueprint(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "start",
+                       config_file="ha_bootstrap_standby_node.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assert_configure_default()
+
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755
+    )
+
+    # verify that the standby case is detected, and that the bootstrap
+    # command is run before the namenode launches
+    self.assertResourceCalled('Execute', 'hdfs namenode -bootstrapStandby',
+                              user = 'hdfs', tries=50)
+
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
+                              action = ['delete'],
+                              not_if='ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+                              )
+    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited &&  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode'",
+                              environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+                              )
+    self.assertResourceCalled('Execute', "hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'",
+                              path = ['/usr/bin'],
+                              tries = 40,
+                              only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
+                              user = 'hdfs',
+                              try_sleep = 10,
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/tmp',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0777,
+                              owner = 'hdfs',
+                              bin_dir = '/usr/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0770,
+                              owner = 'ambari-qa',
+                              bin_dir = '/usr/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              action = ['create'],
+                              bin_dir = '/usr/bin',
+                              only_if = "/usr/bin/sudo su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
+                              )
+    self.assertNoMoreResources()
+
   def test_decommission_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
index 29ee04b..9fe9d03 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
@@ -205,3 +205,136 @@ class TestZkfc(RMFTestCase):
                               action = ['delete'],
                               )
     self.assertNoMoreResources()
+
+
+  def test_start_with_ha_active_namenode_bootstrap(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
+                       classname = "ZkfcSlave",
+                       command = "start",
+                       config_file="ha_bootstrap_active_node.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+                              owner = 'root',
+                              group = 'root',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
+                              content = Template('hdfs.conf.j2'),
+                              owner = 'root',
+                              group = 'root',
+                              mode = 0644,
+                              )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['hdfs-site'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['core-site'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
+                              mode = 0644
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
+                              content = Template('slaves.j2'),
+                              owner = 'hdfs',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755
+    )
+
+    # verify that the znode initialization occurs prior to ZKFC startup
+    self.assertResourceCalled('Execute', 'hdfs zkfc -formatZK -force -nonInteractive',
+                              user = 'hdfs')
+
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
+                              action = ['delete'],
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
+                              )
+    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited &&  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
+                              environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
+                              )
+    self.assertNoMoreResources()
+
+  def test_start_with_ha_standby_namenode_bootstrap(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
+                       classname = "ZkfcSlave",
+                       command = "start",
+                       config_file="ha_bootstrap_standby_node.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assertResourceCalled('Directory', '/etc/security/limits.d',
+                              owner = 'root',
+                              group = 'root',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
+                              content = Template('hdfs.conf.j2'),
+                              owner = 'root',
+                              group = 'root',
+                              mode = 0644,
+                              )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['hdfs-site'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['core-site'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
+                              mode = 0644
+    )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
+                              content = Template('slaves.j2'),
+                              owner = 'hdfs',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755
+    )
+
+    # verify that the znode initialization occurs prior to ZKFC startup
+    self.assertResourceCalled('Execute', 'hdfs zkfc -formatZK -force -nonInteractive',
+                              user = 'hdfs')
+
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
+                              action = ['delete'],
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
+                              )
+    self.assertResourceCalled('Execute', "/usr/bin/sudo su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited &&  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc'",
+                              environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps -p `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
+                              )
+    self.assertNoMoreResources()


[2/3] ambari git commit: AMBARI-8771. Add support for deploying HDFS NameNode HA Clusters with Blueprints. (rnettleton)

Posted by rn...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/4320de6e/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
new file mode 100644
index 0000000..b3e61bc
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
@@ -0,0 +1,618 @@
+{
+    "roleCommand": "START", 
+    "clusterName": "cl1", 
+    "hostname": "c6401.ambari.apache.org", 
+    "passiveInfo": [], 
+    "hostLevelParams": {
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]", 
+        "package_list": "[{\"type\":\"rpm\",\"name\":\"hadoop-yarn\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-nodemanager\"},{\"type\":\"rpm\",\"name\":\"hadoop-mapreduce\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-proxyserver\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-resourcemanager\"}]", 
+        "stack_version": "2.0",
+        "stack_name": "HDP", 
+        "db_name": "ambari", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-7u67-linux-x64.tar.gz",
+        "ambari_db_rca_username": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45", 
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {}, 
+    "serviceName": "YARN", 
+    "role": "NODEMANAGER", 
+    "commandParams": {
+        "service_package_folder": "HDP/2.0.6/services/YARN/package", 
+        "script": "scripts/nodemanager.py", 
+        "hooks_folder": "HDP/2.0.6/hooks",
+        "command_timeout": "600", 
+        "script_type": "PYTHON"
+    }, 
+    "taskId": 93, 
+    "public_hostname": "c6401.ambari.apache.org", 
+    "configurations": {
+        "mapred-site": {
+            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 
+            "mapreduce.cluster.administrators": " hadoop", 
+            "mapreduce.reduce.input.buffer.percent": "0.0", 
+            "mapreduce.output.fileoutputformat.compress": "false", 
+            "mapreduce.framework.name": "yarn", 
+            "mapreduce.map.speculative": "false", 
+            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
+            "yarn.app.mapreduce.am.resource.mb": "683", 
+            "mapreduce.map.java.opts": "-Xmx273m", 
+            "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*", 
+            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
+            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
+            "mapreduce.reduce.speculative": "false", 
+            "mapreduce.reduce.java.opts": "-Xmx546m", 
+            "mapreduce.am.max-attempts": "2", 
+            "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.reduce.log.level": "INFO", 
+            "mapreduce.map.sort.spill.percent": "0.7", 
+            "mapreduce.task.timeout": "300000", 
+            "mapreduce.map.memory.mb": "341", 
+            "mapreduce.task.io.sort.factor": "100", 
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
+            "mapreduce.reduce.memory.mb": "683", 
+            "yarn.app.mapreduce.am.log.level": "INFO", 
+            "mapreduce.map.log.level": "INFO", 
+            "mapreduce.shuffle.port": "13562", 
+            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`", 
+            "mapreduce.map.output.compress": "false", 
+            "yarn.app.mapreduce.am.staging-dir": "/user", 
+            "mapreduce.reduce.shuffle.parallelcopies": "30", 
+            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
+            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
+            "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.task.io.sort.mb": "136", 
+            "yarn.app.mapreduce.am.command-opts": "-Xmx546m", 
+            "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
+        }, 
+        "global": {
+            "security_enabled": "false", 
+            "proxyuser_group": "users", 
+            "zk_user": "zookeeper", 
+            "falcon_user": "falcon", 
+            "syncLimit": "5", 
+            "yarn_user": "yarn", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "yarn_heapsize": "1024", 
+            "rca_enabled": "false", 
+            "namenode_heapsize": "1024m", 
+            "oozie_user": "oozie", 
+            "hcat_conf_dir": "", 
+            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
+            "lzo_enabled": "true", 
+            "namenode_opt_maxnewsize": "200m", 
+            "smokeuser": "ambari-qa", 
+            "hive_user": "hive", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "zk_data_dir": "/hadoop/zookeeper", 
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "initLimit": "10", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "user_group": "hadoop", 
+            "dtnode_heapsize": "1024m", 
+            "gmond_user": "nobody", 
+            "tickTime": "2000", 
+            "storm_user": "storm", 
+            "clientPort": "2181", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce",  
+            "hdfs_user": "hdfs", 
+            "hbase_user": "hbase", 
+            "webhcat_user": "hcat", 
+            "nodemanager_heapsize": "1024", 
+            "gmetad_user": "nobody", 
+            "namenode_opt_newsize": "200m",
+            "namenode_opt_permsize" : "128m",
+            "namenode_opt_maxpermsize" : "256m", 
+            "mapred_user": "mapred", 
+            "resourcemanager_heapsize": "1024", 
+            "hcat_user": "hcat", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
+        }, 
+        "capacity-scheduler": {
+            "yarn.scheduler.capacity.node-locality-delay": "40", 
+            "yarn.scheduler.capacity.root.capacity": "100", 
+            "yarn.scheduler.capacity.root.acl_administer_queues": "*", 
+            "yarn.scheduler.capacity.root.queues": "default", 
+            "yarn.scheduler.capacity.maximum-applications": "10000", 
+            "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
+            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
+            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
+            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", 
+            "yarn.scheduler.capacity.root.default.capacity": "100", 
+            "yarn.scheduler.capacity.root.default.acl_submit_jobs": "*"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.namenode.shared.edits.dir": "qjournal://c6401.ambari.apache.org:8485;c6402.ambari.apache.org:8485;c6403.ambari.apache.org:8485/ns1", 
+            "dfs.namenode.rpc-address.ns1.nn1": "c6401.ambari.apache.org:8020", 
+            "dfs.namenode.http-address.ns1.nn2": "c6402.ambari.apache.org:50070", 
+            "dfs.namenode.http-address.ns1.nn1": "c6401.ambari.apache.org:50070", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.namenode.rpc-address.ns1.nn2": "c6402.ambari.apache.org:8020", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
+            "dfs.cluster.administrators": " hdfs", 
+            "ambari.dfs.datanode.http.port": "50075", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1.0f", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "dfs.ha.automatic-failover.enabled": "true", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journal", 
+            "dfs.blocksize": "134217728", 
+            "dfs.datanode.max.transfer.threads": "1024", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "100", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
+            "dfs.nameservices": "ns1", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.https-address.ns1.nn2": "c6402.ambari.apache.org:50470", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.namenode.https-address.ns1.nn1": "c6401.ambari.apache.org:50470", 
+            "dfs.client.failover.proxy.provider.ns1": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "ambari.dfs.datanode.port": "50010", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
+            "dfs.ha.fencing.methods": "shell(/bin/true)", 
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.ha.namenodes.ns1": "nn1,nn2", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.https.port": "50470", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600"
+        }, 
+        "yarn-log4j": {
+            "log4j.appender.JSA.layout.ConversionPattern": "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n", 
+            "log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary": "${yarn.server.resourcemanager.appsummary.logger}", 
+            "log4j.appender.RMSUMMARY.File": "/var/log/hadoop-yarn/yarn/${yarn.server.resourcemanager.appsummary.log.file}", 
+            "log4j.appender.RMSUMMARY.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.RMSUMMARY.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "hadoop.mapreduce.jobsummary.log.file": "hadoop-mapreduce.jobsummary.log", 
+            "log4j.appender.RMSUMMARY.MaxBackupIndex": "20", 
+            "log4j.appender.RMSUMMARY": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.JSA": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.mapreduce.jobsummary.logger": "${hadoop.root.logger}", 
+            "yarn.server.resourcemanager.appsummary.log.file": "hadoop-mapreduce.jobsummary.log", 
+            "log4j.appender.JSA.DatePattern": ".yyyy-MM-dd", 
+            "yarn.server.resourcemanager.appsummary.logger": "${hadoop.root.logger}", 
+            "log4j.appender.JSA.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.RMSUMMARY.MaxFileSize": "256MB", 
+            "log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary": "false"
+        }, 
+        "core-site": {
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "gluster.daemon.user": "null", 
+            "fs.trash.interval": "360", 
+            "hadoop.security.authentication": "simple", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "fs.AbstractFileSystem.glusterfs.impl": "null", 
+            "fs.defaultFS": "hdfs://ns1", 
+            "ipc.client.connect.max.retries": "50", 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.security.authorization": "false", 
+            "ha.zookeeper.quorum": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181", 
+            "ipc.client.connection.maxidletime": "30000", 
+            "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT"
+        }, 
+        "hdfs-log4j": {
+            "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "log4j.appender.DRFAAUDIT.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.EventCounter": "org.apache.hadoop.log.metrics.EventCounter", 
+            "log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit": "false", 
+            "log4j.appender.DRFAS.File": "${hadoop.log.dir}/${hadoop.security.log.file}", 
+            "log4j.appender.NullAppender": "org.apache.log4j.varia.NullAppender", 
+            "log4j.appender.MRAUDIT.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "log4j.additivity.org.apache.hadoop.mapred.AuditLogger": "false", 
+            "log4j.appender.DRFAS": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.tasklog.noKeepSplits": "4", 
+            "log4j.appender.DRFAAUDIT": "org.apache.log4j.DailyRollingFileAppender", 
+            "log4j.appender.DRFAAUDIT.File": "${hadoop.log.dir}/hdfs-audit.log", 
+            "log4j.appender.DRFAS.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.MRAUDIT": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.security.log.maxbackupindex": "20", 
+            "log4j.appender.DRFA.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.console.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service": "ERROR", 
+            "log4j.appender.RFA.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.tasklog.taskid": "null", 
+            "log4j.appender.RFAS.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.root.logger": "INFO,console", 
+            "hadoop.security.logger": "INFO,console", 
+            "log4j.appender.DRFAAUDIT.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "log4j.appender.RFAS.MaxFileSize": "${hadoop.security.log.maxfilesize}", 
+            "log4j.appender.MRAUDIT.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.RFA.File": "${hadoop.log.dir}/${hadoop.log.file}", 
+            "log4j.appender.RFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "log4j.appender.TLA": "org.apache.hadoop.mapred.TaskLogAppender", 
+            "log4j.logger.org.apache.hadoop.metrics2": "${hadoop.metrics.log.level}", 
+            "log4j.appender.DRFA.File": "${hadoop.log.dir}/${hadoop.log.file}", 
+            "log4j.appender.TLA.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.log.file": "hadoop.log", 
+            "hadoop.security.log.file": "SecurityAuth.audit", 
+            "log4j.appender.console.target": "System.err", 
+            "log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit": "${hdfs.audit.logger}", 
+            "hdfs.audit.logger": "INFO,console", 
+            "log4j.appender.RFAS.MaxBackupIndex": "${hadoop.security.log.maxbackupindex}", 
+            "log4j.appender.TLA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "hadoop.tasklog.iscleanup": "false", 
+            "mapred.audit.logger": "INFO,console", 
+            "log4j.appender.DRFAAUDIT.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.tasklog.logsRetainHours": "12", 
+            "log4j.appender.MRAUDIT.File": "${hadoop.log.dir}/mapred-audit.log", 
+            "log4j.appender.TLA.totalLogFileSize": "${hadoop.tasklog.totalLogFileSize}", 
+            "log4j.appender.DRFA": "org.apache.log4j.DailyRollingFileAppender", 
+            "log4j.category.SecurityLogger": "${hadoop.security.logger}", 
+            "hadoop.tasklog.totalLogFileSize": "100", 
+            "log4j.appender.RFA.MaxFileSize": "256MB", 
+            "log4j.appender.RFAS": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.RFA": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.RFA.layout.ConversionPattern": "%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n", 
+            "log4j.appender.DRFAS.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.threshhold": "ALL", 
+            "log4j.appender.TLA.isCleanup": "${hadoop.tasklog.iscleanup}", 
+            "log4j.appender.TLA.taskId": "${hadoop.tasklog.taskid}", 
+            "log4j.appender.console.layout.ConversionPattern": "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n", 
+            "log4j.appender.MRAUDIT.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.console": "org.apache.log4j.ConsoleAppender", 
+            "hadoop.log.dir": ".", 
+            "hadoop.security.log.maxfilesize": "256MB", 
+            "hadoop.metrics.log.level": "INFO", 
+            "log4j.appender.RFA.MaxBackupIndex": "10", 
+            "log4j.rootLogger": "${hadoop.root.logger}, EventCounter", 
+            "log4j.appender.RFAS.File": "${hadoop.log.dir}/${hadoop.security.log.file}", 
+            "log4j.logger.org.apache.hadoop.mapred.AuditLogger": "${mapred.audit.logger}", 
+            "hadoop.tasklog.purgeLogSplits": "true", 
+            "log4j.appender.DRFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n"
+        }, 
+        "zookeeper-log4j": {
+            "log4j.appender.CONSOLE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n", 
+            "log4j.appender.CONSOLE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n", 
+            "log4j.appender.ROLLINGFILE": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.CONSOLE.Threshold": "INFO", 
+            "log4j.appender.CONSOLE": "org.apache.log4j.ConsoleAppender", 
+            "log4j.appender.ROLLINGFILE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.Threshold": "TRACE", 
+            "log4j.appender.ROLLINGFILE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n", 
+            "log4j.appender.TRACEFILE": "org.apache.log4j.FileAppender", 
+            "log4j.appender.TRACEFILE.File": "zookeeper_trace.log", 
+            "log4j.appender.ROLLINGFILE.File": "zookeeper.log", 
+            "log4j.appender.ROLLINGFILE.MaxFileSize": "10MB", 
+            "log4j.appender.ROLLINGFILE.Threshold": "DEBUG"
+        }, 
+        "yarn-site": {
+            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
+            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 
+            "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", 
+            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025", 
+            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
+            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org", 
+            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
+            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", 
+            "yarn.nodemanager.resource.memory-mb": "2048", 
+            "yarn.scheduler.minimum-allocation-mb": "683", 
+            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050", 
+            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030", 
+            "yarn.log-aggregation.retain-seconds": "2592000", 
+            "yarn.scheduler.maximum-allocation-mb": "2048", 
+            "yarn.log-aggregation-enable": "true", 
+            "yarn.nodemanager.address": "0.0.0.0:45454", 
+            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
+            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
+            "yarn.nodemanager.log.retain-second": "604800", 
+            "yarn.nodemanager.delete.debug-delay-sec": "0", 
+            "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
+            "yarn.nodemanager.health-checker.interval-ms": "135000", 
+            "yarn.resourcemanager.am.max-attempts": "2", 
+            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
+            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
+            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
+            "yarn.nodemanager.vmem-check-enabled": "false", 
+            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
+            "yarn.admin.acl": "*", 
+            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088", 
+            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", 
+            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
+            "yarn.acl.enable": "true", 
+            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs", 
+            "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*", 
+            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 
+            "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler"
+        },
+        "yarn-env": {
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "apptimelineserver_heapsize": "1024", 
+            "nodemanager_heapsize": "1024", 
+            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"", 
+            "yarn_heapsize": "1024", 
+            "yarn_user": "yarn", 
+            "resourcemanager_heapsize": "1024", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
+            "min_user_id": "1000"
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
+        "hadoop-env": {
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "namenode_heapsize": "1024m", 
+            "namenode_opt_newsize": "200m",
+            "namenode_opt_permsize" : "128m",
+            "namenode_opt_maxpermsize" : "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}
 }/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/
 $USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HAD
 OOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n#
  The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
+            "hdfs_user": "hdfs",
+            "dtnode_heapsize": "1024m", 
+            "proxyuser_group": "users",
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop",
+            "dfs_ha_initial_namenode_active" : "c6401.ambari.apache.org",
+            "dfs_ha_initial_namenode_standby" : "c6402.ambari.apache.org"
+        },
+        "hive-env": {
+            "hcat_pid_dir": "/var/run/webhcat", 
+            "hcat_user": "hcat", 
+            "hive_ambari_database": "MySQL", 
+            "hive_hostname": "abtest-3.c.pramod-thangali.internal", 
+            "hive_metastore_port": "9083", 
+            "webhcat_user": "hcat", 
+            "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Director
 y can be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}", 
+            "hive_database_name": "hive", 
+            "hive_database_type": "mysql", 
+            "hive_pid_dir": "/var/run/hive", 
+            "hive_log_dir": "/var/log/hive", 
+            "hive_user": "hive", 
+            "hcat_log_dir": "/var/log/webhcat", 
+            "hive_database": "New MySQL Database"
+        },
+        "hbase-env": {
+            "hbase_pid_dir": "/var/run/hbase", 
+            "hbase_user": "hbase", 
+            "hbase_master_heapsize": "1024m", 
+            "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintG
 CDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# 
 Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\
 "$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% endif %}", 
+            "hbase_regionserver_heapsize": "1024m",
+            "hbase_regionserver_xmn_max": "512",
+            "hbase_regionserver_xmn_ratio": "0.2",
+            "hbase_log_dir": "/var/log/hbase"
+        },
+        "ganglia-env": {
+            "gmond_user": "nobody", 
+            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
+            "rrdcached_base_dir": "/var/lib/ganglia/rrds", 
+            "rrdcached_flush_timeout": "7200", 
+            "gmetad_user": "nobody", 
+            "rrdcached_write_threads": "4", 
+            "rrdcached_delay": "1800", 
+            "rrdcached_timeout": "3600"
+        },
+        "zookeeper-env": {
+            "clientPort": "2181", 
+            "zk_user": "zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "syncLimit": "5", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "initLimit": "10", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_data_dir": "/hadoop/zookeeper", 
+            "tickTime": "2000"
+        },
+        "mapred-env": {
+            "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "mapred_user": "mapred", 
+            "jobhistory_heapsize": "900", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+        },
+        "tez-env": {
+            "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
+            "tez_user": "tez"
+        }, 
+        "storm-env": {
+            "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\n# export STORM_CONF_DIR=\"\"", 
+            "storm_log_dir": "/var/log/storm", 
+            "storm_pid_dir": "/var/run/storm", 
+            "storm_user": "storm"
+        }, 
+        "falcon-env": {
+            "falcon_port": "15000", 
+            "falcon_pid_dir": "/var/run/falcon", 
+            "falcon_log_dir": "/var/log/falcon", 
+            "falcon.emeddedmq.port": "61616", 
+            "falcon_user": "falcon", 
+            "falcon_local_dir": "/hadoop/falcon", 
+            "content": "\n# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java_home}}\n\n# any additional java opts you want to set. This will apply to both client and server operations\n#export FALCON_OPTS=\n\n# any additional java opts that you want to set for client only\n#export FALCON_CLIENT_OPTS=\n\n# java heap size we want to set for the client. Default is 1024MB\n#export FALCON_CLIENT_HEAP=\n\n# any additional opts you want to set for prisim service.\n#export FALCON_PRISM_OPTS=\n\n# java heap size we want to set for the prisim service. Default is 1024MB\n#export FALCON_PRISM_HEAP=\n\n# any additional opts you want to set for falcon service.\nexport FALCON_SERVER_OPTS=\"-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Dfalcon.emeddedmq.port={{falcon_emeddedmq_port}}\"\n\n# java heap size we want to set for the falcon server. Default is 1024MB\n#export FALCON_SERVER_HEAP=\n\n# What is is considered as falco
 n home dir. Default is the base locaion of the installed software\n#export FALCON_HOME_DIR=\n\n# Where log files are stored. Defatult is logs directory under the base install location\nexport FALCON_LOG_DIR={{falcon_log_dir}}\n\n# Where pid files are stored. Defatult is logs directory under the base install location\nexport FALCON_PID_DIR={{falcon_pid_dir}}\n\n# where the falcon active mq data is stored. Defatult is logs/data directory under the base install location\nexport FALCON_DATA_DIR={{falcon_embeddedmq_data}}\n\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\n#export FALCON_EXPANDED_WEBAPP_DIR=", 
+            "falcon.embeddedmq.data": "/hadoop/falcon/embeddedmq/data", 
+            "falcon.embeddedmq": "true", 
+            "falcon_store_uri": "file:///hadoop/falcon/store"
+        }, 
+        "oozie-env": {
+            "oozie_derby_database": "Derby", 
+            "oozie_admin_port": "11001", 
+            "oozie_hostname": "abtest-3.c.pramod-thangali.internal", 
+            "oozie_pid_dir": "/var/run/oozie", 
+            "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n  export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}\n  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie config
 uration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64", 
+            "oozie_user": "oozie", 
+            "oozie_database": "New Derby Database", 
+            "oozie_data_dir": "/hadoop/oozie/data", 
+            "oozie_log_dir": "/var/log/oozie"
+        }, 
+        "webhcat-env": {
+            "content": "\n# The file containing the running pid\nPID_FILE={{pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=/usr/lib/hadoop"
+        }, 
+        "pig-env": {
+            "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
+        }, 
+        "sqoop-env": {
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
+            "sqoop_user": "sqoop"
+        }
+    },
+    "configuration_attributes": {
+      "mapred-site": {
+        "final": {
+          "mapred.healthChecker.script.path": "true",
+          "mapreduce.jobtracker.staging.root.dir": "true"
+        }
+      },
+      "oozie-site": {
+        "final": {
+          "oozie.service.PurgeService.purge.interval": "true",
+          "oozie.service.CallableQueueService.queue.size": "true"
+        }
+      },
+      "webhcat-site": {
+        "final": {
+          "templeton.pig.path": "true",
+          "templeton.exec.timeout": "true",
+          "templeton.override.enabled": "true"
+        }
+      },
+      "hdfs-site": {
+        "final": {
+          "dfs.web.ugi": "true",
+          "dfs.support.append": "true",
+          "dfs.cluster.administrators": "true"
+        }
+      },
+      "hbase-site": {
+        "final": {
+          "hbase.client.keyvalue.maxsize": "true",
+          "hbase.hstore.compactionThreshold": "true",
+          "hbase.rootdir": "true"
+        }
+      },
+      "core-site": {
+        "final": {
+          "hadoop.proxyuser.hive.groups": "true",
+          "webinterface.private.actions": "true",
+          "hadoop.proxyuser.oozie.hosts": "true"
+        }
+      },
+      "hive-site": {
+        "final": {
+          "javax.jdo.option.ConnectionPassword": "true",
+          "javax.jdo.option.ConnectionDriverName": "true",
+          "hive.optimize.bucketmapjoin.sortedmerge": "true"
+        }
+      }
+    },
+    "configurationTags": {
+        "capacity-scheduler": {
+            "tag": "version1"
+        }, 
+        "global": {
+            "tag": "version1"
+        }, 
+        "mapred-site": {
+            "tag": "version1"
+        }, 
+        "hdfs-site": {
+            "tag": "version1392401108182"
+        }, 
+        "yarn-log4j": {
+            "tag": "version1"
+        }, 
+        "core-site": {
+            "tag": "version1392401108196"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "yarn-site": {
+            "tag": "version1"
+        }
+    }, 
+    "commandId": "23-4", 
+    "clusterHostInfo": {
+        "ambari_server_host": [
+            "c6401.ambari.apache.org"
+        ],
+        "nm_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "zkfc_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670", 
+            "8670", 
+            "8670"
+        ], 
+        "journalnode_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "rm_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "hs_host": [
+            "c6402.ambari.apache.org"
+        ]
+    }
+}