You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by yu...@apache.org on 2014/11/14 03:19:46 UTC

[04/29] ambari git commit: AMBARI-8269. Merge branch-windows-dev changes to trunk. (Jayush Luniya via yusaku)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej
new file mode 100644
index 0000000..93981b5
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json.rej
@@ -0,0 +1,527 @@
+***************
+*** 1,262 ****
+  {
+      "configuration_attributes": {
+-         "storm-site": {}, 
+          "hdfs-site": {
+              "final": {
+-                 "dfs.support.append": "true", 
+                  "dfs.namenode.http-address": "true"
+              }
+-         }, 
+-         "storm-env": {}, 
+          "core-site": {
+              "final": {
+                  "fs.defaultFS": "true"
+              }
+-         }, 
+-         "hadoop-policy": {}, 
+-         "hdfs-log4j": {}, 
+-         "hadoop-env": {}, 
+-         "zookeeper-env": {}, 
+-         "zookeeper-log4j": {}, 
+          "cluster-env": {}
+-     }, 
+      "commandParams": {
+-         "command_timeout": "600", 
+-         "script": "scripts/nimbus.py", 
+-         "script_type": "PYTHON", 
+-         "service_package_folder": "HDP/2.1/services/STORM/package", 
+          "hooks_folder": "HDP/2.0.6/hooks"
+-     }, 
+-     "roleCommand": "START", 
+-     "clusterName": "pacan", 
+-     "hostname": "c6402.ambari.apache.org", 
+      "hostLevelParams": {
+-         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+-         "ambari_db_rca_password": "mapred", 
+-         "java_home": "/usr/jdk64/jdk1.7.0_45", 
+-         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+-         "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+-         "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+-         "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]", 
+-         "group_list": "[\"hadoop\",\"users\"]", 
+-         "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]", 
+-         "stack_version": "2.2", 
+-         "stack_name": "HDP", 
+-         "db_name": "ambari", 
+-         "ambari_db_rca_driver": "org.postgresql.Driver", 
+-         "jdk_name": "jdk-7u45-linux-x64.tar.gz", 
+-         "ambari_db_rca_username": "mapred", 
+-         "db_driver_filename": "mysql-connector-java.jar", 
+-         "user_list": "[\"ambari-qa\",\"zookeeper\",\"hdfs\",\"storm\"]", 
+          "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+-     }, 
+-     "commandType": "EXECUTION_COMMAND", 
+-     "roleParams": {}, 
+-     "serviceName": "STORM", 
+-     "role": "NIMBUS", 
+-     "forceRefreshConfigTags": [], 
+-     "taskId": 54, 
+-     "public_hostname": "c6402.ambari.apache.org", 
+      "configurations": {
+          "storm-site": {
+-             "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer", 
+-             "topology.workers": "1", 
+-             "drpc.worker.threads": "64", 
+-             "storm.zookeeper.servers": "['c6402.ambari.apache.org']", 
+-             "supervisor.heartbeat.frequency.secs": "5", 
+-             "topology.executor.send.buffer.size": "1024", 
+-             "drpc.childopts": "-Xmx768m", 
+-             "nimbus.thrift.port": "6627", 
+-             "storm.zookeeper.retry.intervalceiling.millis": "30000", 
+-             "storm.local.dir": "/hadoop/storm", 
+-             "topology.receiver.buffer.size": "8", 
+-             "storm.messaging.netty.client_worker_threads": "1", 
+-             "transactional.zookeeper.root": "/transactional", 
+-             "topology.skip.missing.kryo.registrations": "false", 
+-             "worker.heartbeat.frequency.secs": "1", 
+-             "zmq.hwm": "0", 
+-             "storm.zookeeper.connection.timeout": "15000", 
+-             "_storm.thrift.secure.transport": "SECURED_TRANSPORT_CLASS", 
+-             "storm.messaging.netty.server_worker_threads": "1", 
+-             "supervisor.worker.start.timeout.secs": "120", 
+-             "zmq.threads": "1", 
+-             "topology.acker.executors": "null", 
+-             "storm.local.mode.zmq": "false", 
+-             "topology.max.task.parallelism": "null", 
+-             "topology.max.error.report.per.interval": "5", 
+-             "storm.zookeeper.port": "2181", 
+-             "drpc.queue.size": "128", 
+-             "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM", 
+-             "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM", 
+-             "storm.zookeeper.retry.times": "5", 
+-             "nimbus.monitor.freq.secs": "10", 
+-             "storm.cluster.mode": "distributed", 
+-             "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", 
+-             "drpc.invocations.port": "3773", 
+-             "_storm.thrift.nonsecure.transport": "NON_SECURED_TRANSPORT_CLASS", 
+-             "storm.zookeeper.root": "/storm", 
+-             "logviewer.childopts": "-Xmx128m", 
+-             "transactional.zookeeper.port": "null", 
+-             "topology.worker.childopts": "null", 
+-             "topology.max.spout.pending": "null", 
+-             "nimbus.cleanup.inbox.freq.secs": "600", 
+-             "storm.messaging.netty.min_wait_ms": "100", 
+-             "nimbus.task.timeout.secs": "30", 
+-             "nimbus.thrift.max_buffer_size": "1048576", 
+-             "topology.sleep.spout.wait.strategy.time.ms": "1", 
+-             "topology.optimize": "true", 
+-             "nimbus.reassign": "true", 
+-             "storm.messaging.transport": "backtype.storm.messaging.netty.Context", 
+-             "logviewer.appender.name": "A1", 
+-             "nimbus.host": "c6402.ambari.apache.org", 
+-             "ui.port": "8744", 
+-             "supervisor.slots.ports": "[6700, 6701]", 
+-             "nimbus.file.copy.expiration.secs": "600", 
+-             "supervisor.monitor.frequency.secs": "3", 
+-             "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER", 
+-             "transactional.zookeeper.servers": "null", 
+-             "zmq.linger.millis": "5000", 
+-             "topology.error.throttle.interval.secs": "10", 
+-             "topology.worker.shared.thread.pool.size": "4", 
+-             "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm/lib", 
+-             "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy", 
+-             "task.heartbeat.frequency.secs": "3", 
+-             "topology.transfer.buffer.size": "1024", 
+-             "storm.zookeeper.session.timeout": "20000", 
+-             "topology.executor.receive.buffer.size": "1024", 
+-             "topology.stats.sample.rate": "0.05", 
+-             "topology.fall.back.on.java.serialization": "true", 
+-             "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM", 
+-             "topology.enable.message.timeouts": "true", 
+-             "storm.messaging.netty.max_wait_ms": "1000", 
+-             "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator", 
+-             "nimbus.supervisor.timeout.secs": "60", 
+-             "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", 
+-             "nimbus.inbox.jar.expiration.secs": "3600", 
+-             "drpc.port": "3772", 
+-             "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory", 
+-             "storm.zookeeper.retry.interval": "1000", 
+-             "storm.messaging.netty.max_retries": "30", 
+-             "topology.tick.tuple.freq.secs": "null", 
+-             "drpc.request.timeout.secs": "600", 
+-             "nimbus.task.launch.secs": "120", 
+-             "task.refresh.poll.secs": "10", 
+-             "topology.message.timeout.secs": "30", 
+-             "storm.messaging.netty.buffer_size": "5242880", 
+-             "topology.state.synchronization.timeout.secs": "60", 
+-             "supervisor.worker.timeout.secs": "30", 
+-             "topology.trident.batch.emit.interval.millis": "500", 
+-             "topology.builtin.metrics.bucket.size.secs": "60", 
+-             "logviewer.port": "8000", 
+              "topology.debug": "false"
+-         }, 
+          "hdfs-site": {
+-             "dfs.namenode.avoid.write.stale.datanode": "true", 
+-             "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}", 
+-             "dfs.namenode.checkpoint.txns": "1000000", 
+-             "dfs.block.access.token.enable": "true", 
+-             "dfs.support.append": "true", 
+-             "dfs.datanode.address": "0.0.0.0:1019", 
+-             "dfs.cluster.administrators": " hdfs", 
+-             "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM", 
+-             "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+-             "dfs.namenode.safemode.threshold-pct": "1.0f", 
+-             "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+-             "dfs.permissions.enabled": "true", 
+-             "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+-             "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+-             "dfs.https.port": "50470", 
+-             "dfs.namenode.https-address": "c6402.ambari.apache.org:50470", 
+-             "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM", 
+-             "dfs.blockreport.initialDelay": "120", 
+-             "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal", 
+-             "dfs.blocksize": "134217728", 
+-             "dfs.client.read.shortcircuit": "true", 
+-             "dfs.datanode.max.transfer.threads": "1024", 
+-             "dfs.heartbeat.interval": "3", 
+-             "dfs.replication": "3", 
+-             "dfs.namenode.handler.count": "40", 
+-             "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+-             "fs.permissions.umask-mode": "022", 
+-             "dfs.namenode.stale.datanode.interval": "30000", 
+-             "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+-             "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
+-             "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}", 
+-             "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+-             "dfs.namenode.http-address": "c6402.ambari.apache.org:50070", 
+-             "dfs.webhdfs.enabled": "true", 
+-             "dfs.datanode.failed.volumes.tolerated": "0", 
+-             "dfs.namenode.accesstime.precision": "0", 
+-             "dfs.namenode.avoid.read.stale.datanode": "true", 
+-             "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
+-             "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
+-             "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab", 
+-             "dfs.datanode.http.address": "0.0.0.0:1022", 
+-             "dfs.datanode.du.reserved": "1073741824", 
+-             "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+-             "dfs.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM", 
+-             "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+-             "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+-             "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+-             "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
+-             "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+-             "dfs.permissions.superusergroup": "hdfs", 
+-             "dfs.journalnode.http-address": "0.0.0.0:8480", 
+-             "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
+-             "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+-             "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+-             "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+-             "dfs.datanode.data.dir.perm": "750", 
+-             "dfs.namenode.name.dir.restore": "true", 
+-             "dfs.replication.max": "50", 
+              "dfs.namenode.checkpoint.period": "21600",
+              "dfs.http.policy": "HTTP_ONLY"
+-         }, 
+          "storm-env": {
+-             "storm_log_dir": "/var/log/storm", 
+-             "storm_principal_name": "storm@EXAMPLE.COM", 
+-             "storm_pid_dir": "/var/run/storm", 
+-             "storm_user": "storm", 
+-             "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"", 
+-             "nimbus_principal_name": "nimbus/_HOST@EXAMPLE.COM", 
+-             "strom_ui_keytab": "/etc/security/keytabs/http.storm.service.keytab", 
+-             "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab", 
+-             "storm_keytab": "/etc/security/keytabs/storm.service.keytab", 
+              "strom_ui_principal_name": "HTTP/_HOST"
+-         }, 
+          "core-site": {
+-             "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+-             "fs.trash.interval": "360", 
+-             "hadoop.security.authentication": "kerberos", 
+-             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", 
+-             "hadoop.proxyuser.falcon.hosts": "*", 
+-             "mapreduce.jobtracker.webinterface.trusted": "false", 
+-             "hadoop.security.authorization": "true", 
+-             "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020", 
+-             "ipc.server.tcpnodelay": "true", 
+-             "ipc.client.connect.max.retries": "50", 
+-             "ipc.client.idlethreshold": "8000", 
+-             "io.file.buffer.size": "131072", 
+-             "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](falcon@.*EXAMPLE.COM)s/.*/falcon/\nDEFAULT", 
+-             "ipc.client.connection.maxidletime": "30000", 
+              "hadoop.proxyuser.falcon.groups": "users"
+-         }, 
+          "hadoop-policy": {
+-             "security.job.client.protocol.acl": "*", 
+-             "security.job.task.protocol.acl": "*", 
+-             "security.datanode.protocol.acl": "*", 
+-             "security.namenode.protocol.acl": "*", 
+-             "security.client.datanode.protocol.acl": "*", 
+-             "security.inter.tracker.protocol.acl": "*", 
+-             "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
+-             "security.client.protocol.acl": "*", 
+-             "security.refresh.policy.protocol.acl": "hadoop", 
+-             "security.admin.operations.protocol.acl": "hadoop", 
+              "security.inter.datanode.protocol.acl": "*"
+-         }, 
+          "hdfs-log4j": {
+              "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.
 logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.append
 er.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.lo
 gger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audi
 t=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.
 file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#
 \nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
+-         }, 
+          "hadoop-env": {
+              "namenode_opt_maxnewsize": "200m", 
+              "hdfs_log_dir_prefix": "/var/log/hadoop", 
+--- 1,262 ----
+  {
+      "configuration_attributes": {
++         "storm-site": {},
+          "hdfs-site": {
+              "final": {
++                 "dfs.support.append": "true",
+                  "dfs.namenode.http-address": "true"
+              }
++         },
++         "storm-env": {},
+          "core-site": {
+              "final": {
+                  "fs.defaultFS": "true"
+              }
++         },
++         "hadoop-policy": {},
++         "hdfs-log4j": {},
++         "hadoop-env": {},
++         "zookeeper-env": {},
++         "zookeeper-log4j": {},
+          "cluster-env": {}
++     },
+      "commandParams": {
++         "command_timeout": "600",
++         "script": "scripts/nimbus.py",
++         "script_type": "PYTHON",
++         "service_package_folder": "HDP/2.1/services/STORM/package",
+          "hooks_folder": "HDP/2.0.6/hooks"
++     },
++     "roleCommand": "START",
++     "clusterName": "pacan",
++     "hostname": "c6402.ambari.apache.org",
+      "hostLevelParams": {
++         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
++         "ambari_db_rca_password": "mapred",
++         "java_home": "/usr/jdk64/jdk1.7.0_45",
++         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
++         "jce_name": "UnlimitedJCEPolicyJDK7.zip",
++         "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar",
++         "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.2\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0\"},{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\"}]",
++         "group_list": "[\"hadoop\",\"users\"]",
++         "package_list": "[{\"name\":\"storm_2_2_0_0_*\"}]",
++         "stack_version": "2.2",
++         "stack_name": "HDP",
++         "db_name": "ambari",
++         "ambari_db_rca_driver": "org.postgresql.Driver",
++         "jdk_name": "jdk-7u45-linux-x64.tar.gz",
++         "ambari_db_rca_username": "mapred",
++         "db_driver_filename": "mysql-connector-java.jar",
++         "user_list": "[\"ambari-qa\",\"zookeeper\",\"hdfs\",\"storm\"]",
+          "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
++     },
++     "commandType": "EXECUTION_COMMAND",
++     "roleParams": {},
++     "serviceName": "STORM",
++     "role": "NIMBUS",
++     "forceRefreshConfigTags": [],
++     "taskId": 54,
++     "public_hostname": "c6402.ambari.apache.org",
+      "configurations": {
+          "storm-site": {
++             "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer",
++             "topology.workers": "1",
++             "drpc.worker.threads": "64",
++             "storm.zookeeper.servers": "['c6402.ambari.apache.org']",
++             "supervisor.heartbeat.frequency.secs": "5",
++             "topology.executor.send.buffer.size": "1024",
++             "drpc.childopts": "-Xmx768m",
++             "nimbus.thrift.port": "6627",
++             "storm.zookeeper.retry.intervalceiling.millis": "30000",
++             "storm.local.dir": "/hadoop/storm",
++             "topology.receiver.buffer.size": "8",
++             "storm.messaging.netty.client_worker_threads": "1",
++             "transactional.zookeeper.root": "/transactional",
++             "topology.skip.missing.kryo.registrations": "false",
++             "worker.heartbeat.frequency.secs": "1",
++             "zmq.hwm": "0",
++             "storm.zookeeper.connection.timeout": "15000",
++             "_storm.thrift.secure.transport": "SECURED_TRANSPORT_CLASS",
++             "storm.messaging.netty.server_worker_threads": "1",
++             "supervisor.worker.start.timeout.secs": "120",
++             "zmq.threads": "1",
++             "topology.acker.executors": "null",
++             "storm.local.mode.zmq": "false",
++             "topology.max.task.parallelism": "null",
++             "topology.max.error.report.per.interval": "5",
++             "storm.zookeeper.port": "2181",
++             "drpc.queue.size": "128",
++             "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM",
++             "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
++             "storm.zookeeper.retry.times": "5",
++             "nimbus.monitor.freq.secs": "10",
++             "storm.cluster.mode": "distributed",
++             "dev.zookeeper.path": "/tmp/dev-storm-zookeeper",
++             "drpc.invocations.port": "3773",
++             "_storm.thrift.nonsecure.transport": "NON_SECURED_TRANSPORT_CLASS",
++             "storm.zookeeper.root": "/storm",
++             "logviewer.childopts": "-Xmx128m",
++             "transactional.zookeeper.port": "null",
++             "topology.worker.childopts": "null",
++             "topology.max.spout.pending": "null",
++             "nimbus.cleanup.inbox.freq.secs": "600",
++             "storm.messaging.netty.min_wait_ms": "100",
++             "nimbus.task.timeout.secs": "30",
++             "nimbus.thrift.max_buffer_size": "1048576",
++             "topology.sleep.spout.wait.strategy.time.ms": "1",
++             "topology.optimize": "true",
++             "nimbus.reassign": "true",
++             "storm.messaging.transport": "backtype.storm.messaging.netty.Context",
++             "logviewer.appender.name": "A1",
++             "nimbus.host": "c6402.ambari.apache.org",
++             "ui.port": "8744",
++             "supervisor.slots.ports": "[6700, 6701]",
++             "nimbus.file.copy.expiration.secs": "600",
++             "supervisor.monitor.frequency.secs": "3",
++             "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
++             "transactional.zookeeper.servers": "null",
++             "zmq.linger.millis": "5000",
++             "topology.error.throttle.interval.secs": "10",
++             "topology.worker.shared.thread.pool.size": "4",
++             "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm/lib",
++             "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy",
++             "task.heartbeat.frequency.secs": "3",
++             "topology.transfer.buffer.size": "1024",
++             "storm.zookeeper.session.timeout": "20000",
++             "topology.executor.receive.buffer.size": "1024",
++             "topology.stats.sample.rate": "0.05",
++             "topology.fall.back.on.java.serialization": "true",
++             "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
++             "topology.enable.message.timeouts": "true",
++             "storm.messaging.netty.max_wait_ms": "1000",
++             "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator",
++             "nimbus.supervisor.timeout.secs": "60",
++             "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy",
++             "nimbus.inbox.jar.expiration.secs": "3600",
++             "drpc.port": "3772",
++             "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory",
++             "storm.zookeeper.retry.interval": "1000",
++             "storm.messaging.netty.max_retries": "30",
++             "topology.tick.tuple.freq.secs": "null",
++             "drpc.request.timeout.secs": "600",
++             "nimbus.task.launch.secs": "120",
++             "task.refresh.poll.secs": "10",
++             "topology.message.timeout.secs": "30",
++             "storm.messaging.netty.buffer_size": "5242880",
++             "topology.state.synchronization.timeout.secs": "60",
++             "supervisor.worker.timeout.secs": "30",
++             "topology.trident.batch.emit.interval.millis": "500",
++             "topology.builtin.metrics.bucket.size.secs": "60",
++             "logviewer.port": "8000",
+              "topology.debug": "false"
++         },
+          "hdfs-site": {
++             "dfs.namenode.avoid.write.stale.datanode": "true",
++             "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
++             "dfs.namenode.checkpoint.txns": "1000000",
++             "dfs.block.access.token.enable": "true",
++             "dfs.support.append": "true",
++             "dfs.datanode.address": "0.0.0.0:1019",
++             "dfs.cluster.administrators": " hdfs",
++             "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM",
++             "dfs.datanode.balance.bandwidthPerSec": "6250000",
++             "dfs.namenode.safemode.threshold-pct": "1.0f",
++             "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
++             "dfs.permissions.enabled": "true",
++             "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
++             "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
++             "dfs.https.port": "50470",
++             "dfs.namenode.https-address": "c6402.ambari.apache.org:50470",
++             "dfs.secondary.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
++             "dfs.blockreport.initialDelay": "120",
++             "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
++             "dfs.blocksize": "134217728",
++             "dfs.client.read.shortcircuit": "true",
++             "dfs.datanode.max.transfer.threads": "1024",
++             "dfs.heartbeat.interval": "3",
++             "dfs.replication": "3",
++             "dfs.namenode.handler.count": "40",
++             "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
++             "fs.permissions.umask-mode": "022",
++             "dfs.namenode.stale.datanode.interval": "30000",
++             "dfs.datanode.ipc.address": "0.0.0.0:8010",
++             "dfs.namenode.name.dir": "/hadoop/hdfs/namenode",
++             "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}",
++             "dfs.datanode.data.dir": "/hadoop/hdfs/data",
++             "dfs.namenode.http-address": "c6402.ambari.apache.org:50070",
++             "dfs.webhdfs.enabled": "true",
++             "dfs.datanode.failed.volumes.tolerated": "0",
++             "dfs.namenode.accesstime.precision": "0",
++             "dfs.namenode.avoid.read.stale.datanode": "true",
++             "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
++             "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM",
++             "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab",
++             "dfs.datanode.http.address": "0.0.0.0:1022",
++             "dfs.datanode.du.reserved": "1073741824",
++             "dfs.client.read.shortcircuit.streams.cache.size": "4096",
++             "dfs.namenode.kerberos.https.principal": "HTTP/_HOST@EXAMPLE.COM",
++             "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
++             "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
++             "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
++             "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab",
++             "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab",
++             "dfs.permissions.superusergroup": "hdfs",
++             "dfs.journalnode.http-address": "0.0.0.0:8480",
++             "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM",
++             "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
++             "dfs.namenode.write.stale.datanode.ratio": "1.0f",
++             "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
++             "dfs.datanode.data.dir.perm": "750",
++             "dfs.namenode.name.dir.restore": "true",
++             "dfs.replication.max": "50",
+              "dfs.namenode.checkpoint.period": "21600",
+              "dfs.http.policy": "HTTP_ONLY"
++         },
+          "storm-env": {
++             "storm_log_dir": "/var/log/storm",
++             "storm_principal_name": "storm@EXAMPLE.COM",
++             "storm_pid_dir": "/var/run/storm",
++             "storm_user": "storm",
++             "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"",
++             "nimbus_principal_name": "nimbus/_HOST@EXAMPLE.COM",
++             "strom_ui_keytab": "/etc/security/keytabs/http.storm.service.keytab",
++             "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab",
++             "storm_keytab": "/etc/security/keytabs/storm.service.keytab",
+              "strom_ui_principal_name": "HTTP/_HOST"
++         },
+          "core-site": {
++             "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization",
++             "fs.trash.interval": "360",
++             "hadoop.security.authentication": "kerberos",
++             "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
++             "hadoop.proxyuser.falcon.hosts": "*",
++             "mapreduce.jobtracker.webinterface.trusted": "false",
++             "hadoop.security.authorization": "true",
++             "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020",
++             "ipc.server.tcpnodelay": "true",
++             "ipc.client.connect.max.retries": "50",
++             "ipc.client.idlethreshold": "8000",
++             "io.file.buffer.size": "131072",
++             "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](falcon@.*EXAMPLE.COM)s/.*/falcon/\nDEFAULT",
++             "ipc.client.connection.maxidletime": "30000",
+              "hadoop.proxyuser.falcon.groups": "users"
++         },
+          "hadoop-policy": {
++             "security.job.client.protocol.acl": "*",
++             "security.job.task.protocol.acl": "*",
++             "security.datanode.protocol.acl": "*",
++             "security.namenode.protocol.acl": "*",
++             "security.client.datanode.protocol.acl": "*",
++             "security.inter.tracker.protocol.acl": "*",
++             "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
++             "security.client.protocol.acl": "*",
++             "security.refresh.policy.protocol.acl": "hadoop",
++             "security.admin.operations.protocol.acl": "hadoop",
+              "security.inter.datanode.protocol.acl": "*"
++         },
+          "hdfs-log4j": {
+              "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.
 logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.append
 er.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.lo
 gger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audi
 t=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.
 file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#
 \nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN"
++         },
+          "hadoop-env": {
+              "namenode_opt_maxnewsize": "200m", 
+              "hdfs_log_dir_prefix": "/var/log/hadoop", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/test/python/unitTests.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/unitTests.py b/ambari-server/src/test/python/unitTests.py
index b4e3688..f2ffacb 100644
--- a/ambari-server/src/test/python/unitTests.py
+++ b/ambari-server/src/test/python/unitTests.py
@@ -90,7 +90,8 @@ def stack_test_executor(base_folder, service, stack, custom_tests, executor_resu
 
   tests = get_test_files(base_folder, mask = test_mask)
 
-  shuffle(tests)
+  #TODO Add an option to randomize the tests' execution
+  #shuffle(tests)
   modules = [os.path.basename(s)[:-3] for s in tests]
   suites = [unittest.defaultTestLoader.loadTestsFromName(name) for name in
     modules]
@@ -183,7 +184,8 @@ def main():
     test_mask = TEST_MASK
 
   tests = get_test_files(pwd, mask=test_mask, recursive=False)
-  shuffle(tests)
+  #TODO Add an option to randomize the tests' execution
+  #shuffle(tests)
   modules = [os.path.basename(s)[:-3] for s in tests]
   suites = [unittest.defaultTestLoader.loadTestsFromName(name) for name in
     modules]

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-shell/ambari-python-shell/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-shell/ambari-python-shell/pom.xml b/ambari-shell/ambari-python-shell/pom.xml
index 08d74c0..4381e1b 100644
--- a/ambari-shell/ambari-python-shell/pom.xml
+++ b/ambari-shell/ambari-python-shell/pom.xml
@@ -75,7 +75,7 @@
         <executions>
           <execution>
             <configuration>
-              <executable>${project.basedir}/../../ambari-common/src/main/unix/ambari-python-wrap</executable>
+              <executable>${executable.python}</executable>
               <workingDirectory>target/ambari-python-shell-${project.version}</workingDirectory>
               <arguments>
                 <argument>${project.basedir}/src/main/python/setup.py</argument>
@@ -216,4 +216,30 @@
       </extension>
     </extensions>
   </build>
+  <profiles>
+    <profile>
+      <id>windows</id>
+        <activation>
+        <os>
+          <family>win</family>
+        </os>
+        </activation>
+          <properties>
+            <envClassifier>win</envClassifier>
+            <executable.python>python</executable.python>
+          </properties>
+      </profile>
+      <profile>
+        <id>linux</id>
+        <activation>
+          <os>
+            <family>unix</family>
+          </os>
+        </activation>
+        <properties>
+          <envClassifier>linux</envClassifier>
+          <executable.python>${project.basedir}/../../ambari-common/src/main/unix/ambari-python-wrap</executable.python>
+        </properties>
+      </profile>
+  </profiles>
 </project>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-shell/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-shell/pom.xml b/ambari-shell/pom.xml
index 947c9b9..b23602e 100644
--- a/ambari-shell/pom.xml
+++ b/ambari-shell/pom.xml
@@ -56,6 +56,23 @@
         </executions>
       </plugin>
       <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>**/*.iml</exclude>
+          </excludes>
+        </configuration>
+        <executions>
+          <execution>
+            <phase>test</phase>
+            <goals>
+              <goal>check</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.vafer</groupId>
         <artifactId>jdeb</artifactId>
         <version>1.0.1</version>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-views/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-views/pom.xml b/ambari-views/pom.xml
index fad949a..cecf8a1 100644
--- a/ambari-views/pom.xml
+++ b/ambari-views/pom.xml
@@ -133,6 +133,9 @@
                     <excludes>
                         <exclude>**/*.json</exclude>
                     </excludes>
+                    <excludes>
+                        <exclude>**/*.iml</exclude>
+                    </excludes>
                 </configuration>
                 <executions>
                     <execution>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/app.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/app.js b/ambari-web/app/app.js
index 77bb21e..20ad4ac 100644
--- a/ambari-web/app/app.js
+++ b/ambari-web/app/app.js
@@ -72,14 +72,14 @@ module.exports = Em.Application.create({
   currentStackVersion: '',
   currentStackName: function() {
     return Em.get((this.get('currentStackVersion') || this.get('defaultStackVersion')).match(/(.+)-\d.+/), '1');
-  }.property('currentStackVersion'),
+  }.property('currentStackVersion', 'defaultStackVersion'),
 
   allHostNames: [],
 
   currentStackVersionNumber: function () {
     var regExp = new RegExp(this.get('currentStackName') + '-');
     return (this.get('currentStackVersion') || this.get('defaultStackVersion')).replace(regExp, '');
-  }.property('currentStackVersion', 'currentStackName'),
+  }.property('currentStackVersion', 'defaultStackVersion', 'currentStackName'),
 
   isHadoop2Stack: function () {
     var result = true;
@@ -96,6 +96,10 @@ module.exports = Em.Application.create({
     return (stringUtils.compareVersions(this.get('currentStackVersionNumber'), "2.2") > -1);
   }.property('currentStackVersionNumber'),
 
+  isHadoopWindowsStack: function() {
+    return this.get('currentStackName') == "HDPWIN";
+  }.property('currentStackName'),
+
   /**
    * If NameNode High Availability is enabled
    * Based on <code>clusterStatus.isInstalled</code>, stack version, <code>SNameNode</code> availability
@@ -155,6 +159,18 @@ module.exports = Em.Application.create({
       return App.StackService.find().filterProperty('isMonitoringService').mapProperty('serviceName');
     }.property('App.router.clusterController.isLoaded'),
 
+    hostMetrics: function () {
+      return App.StackService.find().filterProperty('isHostMetricsService').mapProperty('serviceName');
+    }.property('App.router.clusterController.isLoaded'),
+
+    serviceMetrics: function () {
+      return App.StackService.find().filterProperty('isServiceMetricsService').mapProperty('serviceName');
+    }.property('App.router.clusterController.isLoaded'),
+
+    alerting: function () {
+      return App.StackService.find().filterProperty('isAlertingService').mapProperty('serviceName');
+    }.property('App.router.clusterController.isLoaded'),
+
     supportsServiceCheck: function() {
       return App.StackService.find().filterProperty('serviceCheckSupported').mapProperty('serviceName');
     }.property('App.router.clusterController.isLoaded')

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/assets/data/configuration/cluster_env_site.json
----------------------------------------------------------------------
diff --git a/ambari-web/app/assets/data/configuration/cluster_env_site.json b/ambari-web/app/assets/data/configuration/cluster_env_site.json
new file mode 100644
index 0000000..292b0a1
--- /dev/null
+++ b/ambari-web/app/assets/data/configuration/cluster_env_site.json
@@ -0,0 +1,14 @@
+{
+  "href" : "http://c6401.ambari.apache.org:8080/api/v1/clusters/c/configurations?type=cluster-env",
+    "items" : [
+  {
+    "href" : "http://c6401.ambari.apache.org:8080/api/v1/clusters/c/configurations?type=cluster-env&tag=version1",
+    "tag" : "version1",
+    "type" : "cluster-env",
+    "version" : 1,
+    "Config" : {
+      "cluster_name" : "c"
+    }
+  }
+]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index e6f08f2..aa692d0 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -28,6 +28,8 @@ App.testEnableSecurity = true; // By default enable security is tested; turning
 App.testNameNodeHA = true;
 App.apiPrefix = '/api/v1';
 App.defaultStackVersion = 'HDP-2.2';
+App.defaultWindowsStackVersion = 'HDPWIN-2.1';
+
 App.defaultJavaHome = '/usr/jdk/jdk1.6.0_31';
 App.timeout = 180000; // default AJAX timeout
 App.maxRetries = 3; // max number of retries for certain AJAX calls

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/controllers/global/cluster_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/cluster_controller.js b/ambari-web/app/controllers/global/cluster_controller.js
index f72d0cd..bd03394 100644
--- a/ambari-web/app/controllers/global/cluster_controller.js
+++ b/ambari-web/app/controllers/global/cluster_controller.js
@@ -97,6 +97,7 @@ App.ClusterController = Em.Controller.extend({
     var dfd = $.Deferred();
 
     if (App.get('clusterName') && !reload) {
+      App.set('clusterName', this.get('clusterName'));
       dfd.resolve();
     } else {
       App.ajax.send({

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/controllers/installer.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/installer.js b/ambari-web/app/controllers/installer.js
index 1fd57b3..73cfb62 100644
--- a/ambari-web/app/controllers/installer.js
+++ b/ambari-web/app/controllers/installer.js
@@ -102,10 +102,6 @@ App.InstallerController = App.WizardController.extend({
     return jQuery.extend({}, this.get('clusterStatusTemplate'));
   },
 
-  getInstallOptions: function () {
-    return jQuery.extend({}, this.get('installOptionsTemplate'));
-  },
-
   getHosts: function () {
     return [];
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js b/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
index 0551b2c..94221aa 100644
--- a/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
+++ b/ambari-web/app/controllers/main/admin/serviceAccounts_controller.js
@@ -58,11 +58,11 @@ App.MainAdminServiceAccountsController = App.MainServiceInfoConfigsController.ex
     App.router.get('configurationController').getConfigsByTags(this.get('serviceConfigTags')).done(function (configGroups) {
       var configSet = App.config.mergePreDefinedWithLoaded(configGroups, [], self.get('serviceConfigTags'), serviceName);
 
-      var misc_configs = configSet.configs.filterProperty('serviceName', self.get('selectedService')).filterProperty('category', 'Users and Groups').filterProperty('isVisible', true);
+      var misc_configs = configSet.configs.filterProperty('serviceName', self.get('selectedService')).filterProperty('category', 'Users and Groups').filterProperty('isVisible', true).rejectProperty('displayType', 'password');
 
       misc_configs = App.config.miscConfigVisibleProperty(misc_configs, installedServices);
 
-      var sortOrder = self.get('configs').filterProperty('serviceName', self.get('selectedService')).filterProperty('category', 'Users and Groups').filterProperty('isVisible', true).mapProperty('name');
+      var sortOrder = self.get('configs').filterProperty('serviceName', self.get('selectedService')).filterProperty('category', 'Users and Groups').filterProperty('isVisible', true).rejectProperty('displayType', 'password').mapProperty('name');
 
 
       self.setProxyUserGroupLabel(misc_configs);

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/controllers/main/charts/heatmap.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/charts/heatmap.js b/ambari-web/app/controllers/main/charts/heatmap.js
index 8010ead..324be13 100644
--- a/ambari-web/app/controllers/main/charts/heatmap.js
+++ b/ambari-web/app/controllers/main/charts/heatmap.js
@@ -33,20 +33,25 @@ App.MainChartsHeatmapController = Em.Controller.extend({
   }.property('modelRacks.@each.isLoaded'),
 
   allMetrics: function () {
-    var metrics = [
-      Em.Object.create({
-        label: Em.I18n.t('charts.heatmap.category.host'),
-        category: 'host',
-        items: [
-          App.MainChartHeatmapDiskSpaceUsedMetric.create(),
-          App.MainChartHeatmapMemoryUsedMetric.create(),
-          App.MainChartHeatmapCpuWaitIOMetric.create()
-          /*, App.MainChartHeatmapProcessRunMetric.create()*/
-        ]
-      })
-    ];
-
-    if (App.HDFSService.find().get('length')) {
+    var metrics = [];
+
+    // Display host heatmaps if the stack definition has a host metrics service to display it.
+    if(App.get('services.hostMetrics').length) {
+      metrics.push(
+        Em.Object.create({
+          label: Em.I18n.t('charts.heatmap.category.host'),
+          category: 'host',
+          items: [
+            App.MainChartHeatmapDiskSpaceUsedMetric.create(),
+            App.MainChartHeatmapMemoryUsedMetric.create(),
+            App.MainChartHeatmapCpuWaitIOMetric.create()
+            /*, App.MainChartHeatmapProcessRunMetric.create()*/
+          ]
+        })
+      );
+    }
+
+    if(App.HDFSService.find().get('length')) {
       metrics.push(
         Em.Object.create({
           label: Em.I18n.t('charts.heatmap.category.hdfs'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/controllers/main/host/add_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host/add_controller.js b/ambari-web/app/controllers/main/host/add_controller.js
index 001175a..9945aa3 100644
--- a/ambari-web/app/controllers/main/host/add_controller.js
+++ b/ambari-web/app/controllers/main/host/add_controller.js
@@ -79,14 +79,6 @@ App.AddHostController = App.WizardController.extend({
   },
 
   /**
-   * return new object extended from installOptionsTemplate
-   * @return Object
-   */
-  getInstallOptions: function () {
-    return jQuery.extend({}, this.get('installOptionsTemplate'));
-  },
-
-  /**
    * Remove host from model. Used at <code>Confirm hosts</code> step
    * @param hosts Array of hosts, which we want to delete
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/controllers/main/service/info/configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/configs.js b/ambari-web/app/controllers/main/service/info/configs.js
index 6ed0d72..9333084 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -58,6 +58,9 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
   // note passed on configs save
   serviceConfigVersionNote: '',
   versionLoaded: false,
+  // current cluster-env version
+  clusterEnvTagVersion: '',
+
   isCurrentSelected: function () {
     return App.ServiceConfigVersion.find(this.get('content.serviceName') + "_" + this.get('selectedVersion')).get('isCurrent');
   }.property('selectedVersion', 'content.serviceName'),
@@ -253,6 +256,26 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
   loadStep: function () {
     console.log("TRACE: Loading configure for service");
     this.clearStep();
+    this.loadClusterEnvSite();
+  },
+
+  /**
+   * load all tag versions of cluster-env site
+   * @returns {$.ajax}
+   */
+  loadClusterEnvSite: function () {
+    var self = this;
+    return App.ajax.send({
+      name: 'config.cluster_env_site',
+      sender: self,
+      success: 'loadClusterEnvSiteSuccess'
+    });
+  },
+
+  loadClusterEnvSiteSuccess: function (data) {
+    // find the latest tag version
+    var maxVersion = Math.max.apply(this, data.items.mapProperty('version'));
+    this.set('clusterEnvTagVersion', data.items.findProperty('version', maxVersion).tag);
     this.loadServiceConfigs();
   },
 
@@ -407,6 +430,10 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
     }, this);
 
     App.router.get('configurationController').saveToDB(configurations);
+
+    // add cluster-env tag
+    siteToTagMap['cluster-env'] = this.get('clusterEnvTagVersion');
+
     this.loadedClusterSiteToTagMap = siteToTagMap;
     this.set('selectedVersion', selectedVersion);
     //reset map if selected current version of default group
@@ -822,14 +849,87 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
   },
 
   checkDatabaseProperties: function (serviceConfig) {
-    if (!['OOZIE', 'HIVE'].contains(this.get('content.serviceName'))) return;
-    var configsToHide = ['oozie_hostname', 'hive_hostname'];
-    configsToHide.forEach(function (configName) {
-      var property = serviceConfig.configs.findProperty('name', configName);
-      if (property) property.set('isVisible', false);
-    });
+    this.hideSinkDatabaseProperties(serviceConfig.configs);
+    this.hideHiveDatabaseProperties(serviceConfig.configs);
+    this.hideOozieDatabaseProperties(serviceConfig.configs);
+  },
+
+  hideSinkDatabaseProperties: function (configs) {
+    if (!['HDFS'].contains(this.get('content.serviceName'))) return;
+    var property = configs.findProperty('name', 'sink.dbservername');
+    if (property) property.set('isVisible', false);
+    var hadoop_user_property = configs.findProperty('name', 'hadoop.user.name');
+    if(hadoop_user_property)
+    {
+      hadoop_user_property.setProperties({
+        isVisible: false,
+        isRequired: false
+      });
+    }
+
+    var hadoop_password_property = configs.findProperty('name', 'hadoop.user.password');
+    if(hadoop_password_property)
+    {
+      hadoop_password_property.setProperties({
+        isVisible: false,
+        isRequired: false
+      });
+    }
+
+    if (configs.someProperty('name', 'sink_database')) {
+      var sinkDb = configs.findProperty('name', 'sink_database');
+      if (sinkDb.value === 'Existing MSSQL Server database with integrated authentication') {
+        configs.findProperty('name', 'sink.dblogin').setProperties({
+          isVisible: false,
+          isRequired: false
+        });
+        configs.findProperty('name', 'sink.dbpassword').setProperties({
+          isVisible: false,
+          isRequired: false
+        });
+      }
+    }
   },
 
+  hideHiveDatabaseProperties: function (configs) {
+    if (!['HIVE'].contains(this.get('content.serviceName'))) return;
+    var property = configs.findProperty('name', 'hive_hostname');
+    if (property) property.set('isVisible', false);
+
+    if (configs.someProperty('name', 'hive_database')) {
+      var hiveDb = configs.findProperty('name', 'hive_database');
+      if (hiveDb.value === 'Existing MSSQL Server database with integrated authentication') {
+        configs.findProperty('name', 'javax.jdo.option.ConnectionUserName').setProperties({
+          isVisible: false,
+          isRequired: false
+        });
+        configs.findProperty('name', 'javax.jdo.option.ConnectionPassword').setProperties({
+          isVisible: false,
+          isRequired: false
+        });
+      }
+    }
+  },
+
+  hideOozieDatabaseProperties: function (configs) {
+    if (!['OOZIE'].contains(this.get('content.serviceName'))) return;
+    var property = configs.findProperty('name', 'oozie_hostname');
+    if (property) property.set('isVisible', false);
+
+    if (configs.someProperty('name', 'oozie_database')) {
+      var oozieDb = configs.findProperty('name', 'oozie_database');
+      if (oozieDb.value === 'Existing MSSQL Server database with integrated authentication') {
+        configs.findProperty('name', 'oozie.service.JPAService.jdbc.username').setProperties({
+          isVisible: false,
+          isRequired: false
+        });
+        configs.findProperty('name', 'oozie.service.JPAService.jdbc.password').setProperties({
+          isVisible: false,
+          isRequired: false
+        });
+      }
+    }
+  },
 
   onLoadOverrides: function (allConfigs) {
     var serviceName = this.get('content.serviceName');
@@ -1462,11 +1562,44 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
     this.set('saveConfigsFlag', true);
     this.onDoPUTClusterConfigurations();
   },
+
+/**
+   * set sink hostnames in configs
+   * @param configs
+   */
+  setSinkHostName: function (configs) {
+    var dbHostPropertyName = null;
+    if (configs.someProperty('name', 'sink_database')) {
+      var sinkDb = configs.findProperty('name', 'sink_database');
+      if (sinkDb.value === 'Existing MSSQL Server database with integrated authentication') {
+        var existingMSSQLServerHost = configs.findProperty('name', 'sink_existing_mssql_server_host');
+        if (existingMSSQLServerHost) {
+           dbHostPropertyName = 'sink_existing_mssql_server_host';
+        }
+        configs = configs.without(configs.findProperty('name', 'sink_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'sink_existing_mssql_server_2_host'));
+      } else if (sinkDb.value === 'Existing MSSQL Server database with sql auth') {
+        var existingMSSQL2ServerHost = configs.findProperty('name', 'sink_existing_mssql_server_2_host');
+        if (existingMSSQL2ServerHost) {
+           dbHostPropertyName = 'sink_existing_mssql_server_2_host';
+        }
+        configs = configs.without(configs.findProperty('name', 'sink_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'sink_existing_mssql_server_host'));
+      }
+    }
+    if (dbHostPropertyName) {
+      var sinkHostNameProperty = App.ServiceConfigProperty.create(App.config.get('preDefinedSiteProperties').findProperty('name', 'sink.dbservername'));
+      sinkHostNameProperty.set('value', configs.findProperty('name', dbHostPropertyName).get('value'));
+      configs.pushObject(sinkHostNameProperty);
+    }
+  },
+
   /**
    * set hive hostnames in configs
    * @param configs
    */
   setHiveHostName: function (configs) {
+    var dbHostPropertyName = null;
     if (configs.someProperty('name', 'hive_database')) {
       var hiveDb = configs.findProperty('name', 'hive_database');
       if (hiveDb.value === 'New MySQL Database' || hiveDb.value === 'New PostgreSQL Database') {
@@ -1480,10 +1613,14 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
         configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_database'));
         configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_host'));
         configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_host'));
       } else if (hiveDb.value === 'Existing MySQL Database') {
         var existingMySqlHost = configs.findProperty('name', 'hive_existing_mysql_host');
         if (existingMySqlHost) {
-          existingMySqlHost.name = 'hive_hostname';
+          dbHostPropertyName = 'hive_existing_mysql_host';
         }
         configs = configs.without(configs.findProperty('name', 'hive_ambari_host'));
         configs = configs.without(configs.findProperty('name', 'hive_ambari_database'));
@@ -1491,10 +1628,14 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
         configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_database'));
         configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_host'));
         configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_host'));
       } else if (hiveDb.value === Em.I18n.t('services.service.config.hive.oozie.postgresql')) {
         var existingPostgreSqlHost = configs.findProperty('name', 'hive_existing_postgresql_host');
         if (existingPostgreSqlHost) {
-          existingPostgreSqlHost.name = 'hive_hostname';
+          dbHostPropertyName = 'hive_existing_postgresql_host';
         }
         configs = configs.without(configs.findProperty('name', 'hive_ambari_host'));
         configs = configs.without(configs.findProperty('name', 'hive_ambari_database'));
@@ -1502,11 +1643,29 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
         configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_database'));
         configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_host'));
         configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_database'));
-      }
-      else { //existing oracle database
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_host'));
+      } else if (hiveDb.value === 'Existing Oracle Database') {
         var existingOracleHost = configs.findProperty('name', 'hive_existing_oracle_host');
         if (existingOracleHost) {
-          existingOracleHost.name = 'hive_hostname';
+          dbHostPropertyName = 'hive_existing_oracle_host';
+        }
+        configs = configs.without(configs.findProperty('name', 'hive_ambari_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_ambari_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_host'));
+      } else if (hiveDb.value === 'Existing MSSQL Server database with integrated authentication') {
+        var existingMSSQLServerHost = configs.findProperty('name', 'hive_existing_mssql_server_host');
+        if (existingMSSQLServerHost) {
+          dbHostPropertyName = 'hive_existing_mssql_server_host';
         }
         configs = configs.without(configs.findProperty('name', 'hive_ambari_host'));
         configs = configs.without(configs.findProperty('name', 'hive_ambari_database'));
@@ -1514,8 +1673,31 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
         configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_database'));
         configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_host'));
         configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_2_host'));
+      } else if (hiveDb.value === 'Existing MSSQL Server database with sql auth') {
+        var existingMSSQL2ServerHost = configs.findProperty('name', 'hive_existing_mssql_server_2_host');
+        if (existingMSSQL2ServerHost) {
+          dbHostPropertyName = 'hive_existing_mssql_server_2_host';
+        }
+        configs = configs.without(configs.findProperty('name', 'hive_ambari_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_ambari_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mysql_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_postgresql_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_host'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_oracle_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'hive_existing_mssql_server_host'));
       }
-
+    }
+    if (dbHostPropertyName) {
+      var hiveHostNameProperty = App.ServiceConfigProperty.create(App.config.get('preDefinedSiteProperties').findProperty('name', 'hive_hostname'));
+      hiveHostNameProperty.set('value', configs.findProperty('name', dbHostPropertyName).get('value'));
+      configs.pushObject(hiveHostNameProperty);
     }
   },
 
@@ -1536,6 +1718,10 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
         configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_database'));
         configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_host'));
         configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
       } else if (oozieDb.value === 'New MySQL Database') {
         var ambariHost = configs.findProperty('name', 'oozie_ambari_host');
         if (ambariHost) {
@@ -1548,6 +1734,10 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
         configs = configs.without(configs.findProperty('name', 'oozie_derby_database'));
         configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_host'));
         configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
 
       } else if (oozieDb.value === 'Existing MySQL Database') {
         var existingMySqlHost = configs.findProperty('name', 'oozie_existing_mysql_host');
@@ -1561,8 +1751,14 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
         configs = configs.without(configs.findProperty('name', 'oozie_derby_database'));
         configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_host'));
         configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
       } else if (oozieDb.value === Em.I18n.t('services.service.config.hive.oozie.postgresql')) {
         var existingPostgreSqlHost = configs.findProperty('name', 'oozie_existing_postgresql_host');
+
+
         if (existingPostgreSqlHost) {
           dbHostPropertyName = 'oozie_existing_postgresql_host';
         }
@@ -1572,8 +1768,12 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
         configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_database'));
         configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_host'));
         configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
       }
-      else { //existing oracle database
+      else if (oozieDb.value === 'Existing Oracle Database') {
         var existingOracleHost = configs.findProperty('name', 'oozie_existing_oracle_host');
         if (existingOracleHost) {
           dbHostPropertyName = 'oozie_existing_oracle_host';
@@ -1583,8 +1783,43 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
         configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_host'));
         configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_database'));
         configs = configs.without(configs.findProperty('name', 'oozie_derby_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
+      } else if (oozieDb.value === 'Existing MSSQL Server database with integrated authentication') {
+        var existingMySqlServerHost = configs.findProperty('name', 'oozie_existing_mssql_server_host');
+        if (existingMySqlServerHost) {
+          dbHostPropertyName = 'oozie_existing_mssql_server_host';
+        }
+        configs = configs.without(configs.findProperty('name', 'oozie_ambari_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_ambari_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_derby_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_2_host'));
+      } else if (oozieDb.value === 'Existing MSSQL Server database with sql auth') {
+        var existingMySql2ServerHost = configs.findProperty('name', 'oozie_existing_mssql_server_2_host');
+        if (existingMySql2ServerHost) {
+          dbHostPropertyName = 'oozie_existing_mssql_server_2_host';
+        }
+        configs = configs.without(configs.findProperty('name', 'oozie_ambari_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_ambari_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_oracle_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_derby_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_postgresql_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_host'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mysql_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_database'));
+        configs = configs.without(configs.findProperty('name', 'oozie_existing_mssql_server_host'));
       }
-
     }
 
     if (dbHostPropertyName) {
@@ -1602,6 +1837,7 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
     //storedConfigs contains custom configs as well
     this.setHiveHostName(configs);
     this.setOozieHostName(configs);
+    this.setSinkHostName(configs);
     this.formatConfigValues(configs);
     var mappedConfigs = App.config.excludeUnsupportedConfigs(this.get('configMapping').all(), App.Service.find().mapProperty('serviceName'));
     var allUiConfigs = this.loadUiSideConfigs(mappedConfigs);
@@ -2158,16 +2394,24 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
 
     if (serviceName === 'HIVE') {
       var hiveDb = configs.findProperty('name', 'hive_database').value;
-      if (['Existing MySQL Database', 'Existing Oracle Database', 'Existing PostgreSQL Database'].contains(hiveDb)) {
+      if (['Existing MySQL Database', 'Existing Oracle Database', 'Existing PostgreSQL Database', 'Existing MSSQL Server database with integrated authentication', 'Existing MSSQL Server database with sql auth'].contains(hiveDb)) {
         configs.findProperty('name', 'hive_hostname').isVisible = true;
       }
     }
     if (serviceName === 'OOZIE') {
       var oozieDb = configs.findProperty('name', 'oozie_database').value;
-      if (['Existing MySQL Database', 'Existing Oracle Database', 'Existing PostgreSQL Database'].contains(oozieDb)) {
+      if (['Existing MySQL Database', 'Existing Oracle Database', 'Existing PostgreSQL Database', 'Existing MSSQL Server database with integrated authentication', 'Existing MSSQL Server database with sql auth'].contains(oozieDb)) {
         configs.findProperty('name', 'oozie_hostname').isVisible = true;
       }
     }
+    if(App.get('isHadoopWindowsStack')) {
+      if (serviceName === 'HDFS') {
+        var sinkDB = configs.findProperty('name', 'sink_database').value;
+        if (['Existing MSSQL Server database with integrated authentication', 'Existing MSSQL Server database with sql auth'].contains(sinkDB)) {
+          configs.findProperty('name', 'sink.dbservername').isVisible = true;
+        }
+      }
+    }
   },
   /**
    * set host name(s) property for component

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/controllers/wizard.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard.js b/ambari-web/app/controllers/wizard.js
index d35d357..94ac83d 100644
--- a/ambari-web/app/controllers/wizard.js
+++ b/ambari-web/app/controllers/wizard.js
@@ -281,7 +281,7 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, {
    * Remove all data for installOptions step
    */
   clearInstallOptions: function () {
-    var installOptions = jQuery.extend({}, this.get('installOptionsTemplate'));
+    var installOptions = this.get('getInstallOptions');
     this.set('content.installOptions', installOptions);
     this.setDBProperty('installOptions', installOptions);
     this.set('content.hosts', {});
@@ -503,6 +503,10 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, {
     }, this);
   },
 
+  getInstallOptions: function() {
+    return jQuery.extend({}, App.get('isHadoopWindowsStack')? this.get('installWindowsOptionsTemplate') : this.get('installOptionsTemplate'));
+  }.property('App.isHadoopWindowsStack'),
+
   installOptionsTemplate: {
     hostNames: "", //string
     manualInstall: false, //true, false
@@ -514,6 +518,17 @@ App.WizardController = Em.Controller.extend(App.LocalStorage, {
     sshUser: "root" //string
   },
 
+  installWindowsOptionsTemplate: {
+    hostNames: "", //string
+    manualInstall: true, //true, false
+    useSsh: false, //bool
+    javaHome: App.defaultJavaHome, //string
+    localRepo: false, //true, false
+    sshKey: "", //string
+    bootRequestId: null, //string
+    sshUser: "" //string
+  },
+
   loadedServiceComponents: null,
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-web/app/controllers/wizard/step2_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step2_controller.js b/ambari-web/app/controllers/wizard/step2_controller.js
index 08dcd12..5c38a9a 100644
--- a/ambari-web/app/controllers/wizard/step2_controller.js
+++ b/ambari-web/app/controllers/wizard/step2_controller.js
@@ -113,6 +113,10 @@ App.WizardStep2Controller = Em.Controller.extend({
    */
   hostsError: null,
 
+  isSSHRegistrationEnabled: function () {
+    return !App.get('isHadoopWindowsStack');
+  }.property('App.isHadoopWindowsStack'),
+
   /**
    * Error-message if <code>sshKey</code> is empty, null otherwise
    * @type {string|null}
@@ -469,7 +473,7 @@ App.WizardStep2Controller = Em.Controller.extend({
    * @method manualInstallWarningPopup
    */
   manualInstallWarningPopup: function () {
-    if (!this.get('content.installOptions.useSsh')) {
+    if (this.get('isSSHRegistrationEnabled') && !this.get('content.installOptions.useSsh')) {
       App.ModalPopup.show({
         header: Em.I18n.t('common.warning'),
         body: Em.I18n.t('installer.step2.manualInstall.info'),