You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/02/17 18:18:55 UTC

[1/5] git commit: AMBARI-4627. unnittest TemplateConfig (Ivan Kozlov via aonishuk)

Repository: ambari
Updated Branches:
  refs/heads/trunk 33bf1d44c -> 88c2472e5


AMBARI-4627. unnittest TemplateConfig (Ivan Kozlov via aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9e69e1eb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9e69e1eb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9e69e1eb

Branch: refs/heads/trunk
Commit: 9e69e1eb525991dfebb8bb58cabfec6dd3d1998f
Parents: 1367ceb
Author: Andrew Onischuk <ao...@hortonworks.com>
Authored: Mon Feb 17 08:53:07 2014 -0800
Committer: Andrew Onischuk <ao...@hortonworks.com>
Committed: Mon Feb 17 09:18:37 2014 -0800

----------------------------------------------------------------------
 .../TestTemplateConfigResource.py               | 58 ++++++++++++++++++++
 1 file changed, 58 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9e69e1eb/ambari-agent/src/test/python/resource_management/TestTemplateConfigResource.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestTemplateConfigResource.py b/ambari-agent/src/test/python/resource_management/TestTemplateConfigResource.py
new file mode 100644
index 0000000..db88365
--- /dev/null
+++ b/ambari-agent/src/test/python/resource_management/TestTemplateConfigResource.py
@@ -0,0 +1,58 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from unittest import TestCase
+from mock.mock import patch
+from resource_management import *
+from resource_management.libraries.resources.template_config \
+  import TemplateConfig
+
+
+class TestTemplateConfigResource(TestCase):
+
+  @patch("resource_management.libraries.providers.template_config.Template")
+  @patch("resource_management.libraries.providers.template_config.File")
+  def test_create_template_wo_tag(self, file_mock, template_mock):
+    with Environment() as env:
+      TemplateConfig("path",
+                     action="create",
+                     mode=0755,
+                     owner="owner",
+                     group="group",
+                     extra_imports=["extra_imports"]
+      )
+      defined_arguments = env.resources['TemplateConfig']['path'].arguments
+      expected_arguments = {'group': 'group', 'extra_imports': ['extra_imports'], 'action': ['create'], 'mode': 0755, 'owner': 'owner'}
+      self.assertEqual(defined_arguments,expected_arguments)
+      self.assertEqual(file_mock.call_args[0][0],'path')
+      call_args = file_mock.call_args[1].copy()
+      del call_args['content']
+      self.assertEqual(call_args,{'owner': 'owner', 'group': 'group', 'mode': 0755})
+      self.assertEqual(template_mock.call_args[0][0],'path.j2')
+      self.assertEqual(template_mock.call_args[1],{'extra_imports': ['extra_imports']})
+
+
+  @patch("resource_management.libraries.providers.template_config.Template")
+  @patch("resource_management.core.providers.system.FileProvider")
+  def test_create_template_with_tag(self, file_mock, template_mock):
+    with Environment("/") as env:
+      TemplateConfig("path",
+                     action="create",
+                     template_tag="template_tag"
+      )
+      self.assertEqual(template_mock.call_args[0][0],'path-template_tag.j2')
\ No newline at end of file


[2/5] git commit: AMBARI-4700. Oozie tests fails (Ivan Kozlov via aonishuk)

Posted by ao...@apache.org.
AMBARI-4700. Oozie tests fails (Ivan Kozlov via aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1367ceb2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1367ceb2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1367ceb2

Branch: refs/heads/trunk
Commit: 1367ceb2300d4778b2dfbef654e864e7138759dd
Parents: a5a0203
Author: Andrew Onischuk <ao...@hortonworks.com>
Authored: Mon Feb 17 08:43:07 2014 -0800
Committer: Andrew Onischuk <ao...@hortonworks.com>
Committed: Mon Feb 17 09:18:37 2014 -0800

----------------------------------------------------------------------
 .../stacks/1.3.2/OOZIE/test_oozie_client.py     | 14 ++++++++++
 .../stacks/1.3.2/OOZIE/test_oozie_server.py     | 28 ++++++++++++++++++++
 .../python/stacks/1.3.2/configs/default.json    |  3 +++
 .../python/stacks/1.3.2/configs/secured.json    |  3 +++
 .../stacks/2.0.6/OOZIE/test_oozie_client.py     | 14 ++++++++++
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     | 14 ++++++++++
 .../python/stacks/2.0.6/configs/default.json    |  3 +++
 .../python/stacks/2.0.6/configs/secured.json    |  3 +++
 8 files changed, 82 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1367ceb2/ambari-server/src/test/python/stacks/1.3.2/OOZIE/test_oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/OOZIE/test_oozie_client.py b/ambari-server/src/test/python/stacks/1.3.2/OOZIE/test_oozie_client.py
index e60b1ad..eed555d 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/OOZIE/test_oozie_client.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/OOZIE/test_oozie_client.py
@@ -42,6 +42,13 @@ class TestOozieClient(RMFTestCase):
     self.assertResourceCalled('TemplateConfig', '/etc/oozie/conf/oozie-env.sh',
       owner = 'oozie',
     )
+    self.assertResourceCalled('PropertiesFile', 'oozie-log4j.properties',
+      owner = 'oozie',
+      group = 'hadoop',
+      mode = 0664,
+      dir = '/etc/oozie/conf',
+      properties = self.getConfig()['configurations']['oozie-log4j'],
+    )
     self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
       owner = 'oozie',
       group = 'hadoop',
@@ -85,6 +92,13 @@ class TestOozieClient(RMFTestCase):
     self.assertResourceCalled('TemplateConfig', '/etc/oozie/conf/oozie-env.sh',
       owner = 'oozie',
     )
+    self.assertResourceCalled('PropertiesFile', 'oozie-log4j.properties',
+      owner = 'oozie',
+      group = 'hadoop',
+      mode = 0664,
+      dir = '/etc/oozie/conf',
+      properties = self.getConfig()['configurations']['oozie-log4j'],
+    )
     self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
       owner = 'oozie',
       group = 'hadoop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/1367ceb2/ambari-server/src/test/python/stacks/1.3.2/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/1.3.2/OOZIE/test_oozie_server.py
index 7093ac7..b70dda3 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/OOZIE/test_oozie_server.py
@@ -52,6 +52,13 @@ class TestOozieServer(RMFTestCase):
     self.assertResourceCalled('TemplateConfig', '/etc/oozie/conf/oozie-env.sh',
       owner = 'oozie',
     )
+    self.assertResourceCalled('PropertiesFile', 'oozie-log4j.properties',
+      owner = 'oozie',
+      group = 'hadoop',
+      mode = 0664,
+      dir = '/etc/oozie/conf',
+      properties = self.getConfig()['configurations']['oozie-log4j'],
+    )
     self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
       owner = 'oozie',
       group = 'hadoop',
@@ -183,6 +190,13 @@ class TestOozieServer(RMFTestCase):
     self.assertResourceCalled('TemplateConfig', '/etc/oozie/conf/oozie-env.sh',
       owner = 'oozie',
     )
+    self.assertResourceCalled('PropertiesFile', 'oozie-log4j.properties',
+      owner = 'oozie',
+      group = 'hadoop',
+      mode = 0664,
+      dir = '/etc/oozie/conf',
+      properties = self.getConfig()['configurations']['oozie-log4j'],
+    )
     self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
       owner = 'oozie',
       group = 'hadoop',
@@ -308,6 +322,13 @@ class TestOozieServer(RMFTestCase):
     self.assertResourceCalled('TemplateConfig', '/etc/oozie/conf/oozie-env.sh',
                               owner = 'oozie',
                               )
+    self.assertResourceCalled('PropertiesFile', 'oozie-log4j.properties',
+                              owner = 'oozie',
+                              group = 'hadoop',
+                              mode = 0664,
+                              dir = '/etc/oozie/conf',
+                              properties = self.getConfig()['configurations']['oozie-log4j'],
+                              )
     self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
                               owner = 'oozie',
                               group = 'hadoop',
@@ -397,6 +418,13 @@ class TestOozieServer(RMFTestCase):
     self.assertResourceCalled('TemplateConfig', '/etc/oozie/conf/oozie-env.sh',
                               owner = 'oozie',
                               )
+    self.assertResourceCalled('PropertiesFile', 'oozie-log4j.properties',
+                              owner = 'oozie',
+                              group = 'hadoop',
+                              mode = 0664,
+                              dir = '/etc/oozie/conf',
+                              properties = self.getConfig()['configurations']['oozie-log4j'],
+                              )
     self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
                               owner = 'oozie',
                               group = 'hadoop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/1367ceb2/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/default.json b/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
index 299cc99..70a3132 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/default.json
@@ -370,6 +370,9 @@
         },
         "pig-log4j": {
             "property1": "value1"
+        },
+        "oozie-log4j": {
+            "property1": "value1"
         }
     }, 
     "configurationTags": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1367ceb2/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json b/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
index c96ea60..7e3d39d 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
@@ -475,6 +475,9 @@
         },
         "pig-log4j": {
             "property1": "value1"
+        },
+        "oozie-log4j": {
+            "property1": "value1"
         }
     }, 
     "configurationTags": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1367ceb2/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
index 358cab9..bc84440 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
@@ -45,6 +45,13 @@ class TestOozieClient(RMFTestCase):
     self.assertResourceCalled('TemplateConfig', '/etc/oozie/conf/oozie-env.sh',
         owner = 'oozie',
         )
+    self.assertResourceCalled('PropertiesFile', 'oozie-log4j.properties',
+        owner = 'oozie',
+        group = 'hadoop',
+        mode = 0664,
+        dir = '/etc/oozie/conf',
+        properties = self.getConfig()['configurations']['oozie-log4j'],
+        )
     self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
         owner = 'oozie',
         group = 'hadoop',
@@ -91,6 +98,13 @@ class TestOozieClient(RMFTestCase):
     self.assertResourceCalled('TemplateConfig', '/etc/oozie/conf/oozie-env.sh',
                               owner = 'oozie',
                               )
+    self.assertResourceCalled('PropertiesFile', 'oozie-log4j.properties',
+                              owner = 'oozie',
+                              group = 'hadoop',
+                              mode = 0664,
+                              dir = '/etc/oozie/conf',
+                              properties = self.getConfig()['configurations']['oozie-log4j'],
+                              )
     self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
                               owner = 'oozie',
                               group = 'hadoop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/1367ceb2/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index 8e35e23..6b438a6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -138,6 +138,13 @@ class TestOozieServer(RMFTestCase):
     self.assertResourceCalled('TemplateConfig', '/etc/oozie/conf/oozie-env.sh',
                               owner = 'oozie',
                               )
+    self.assertResourceCalled('PropertiesFile', 'oozie-log4j.properties',
+                              owner = 'oozie',
+                              group = 'hadoop',
+                              mode = 0664,
+                              dir = '/etc/oozie/conf',
+                              properties = self.getConfig()['configurations']['oozie-log4j'],
+                              )
     self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
                               owner = 'oozie',
                               group = 'hadoop',
@@ -231,6 +238,13 @@ class TestOozieServer(RMFTestCase):
     self.assertResourceCalled('TemplateConfig', '/etc/oozie/conf/oozie-env.sh',
                               owner = 'oozie',
                               )
+    self.assertResourceCalled('PropertiesFile', 'oozie-log4j.properties',
+                              owner = 'oozie',
+                              group = 'hadoop',
+                              mode = 0664,
+                              dir = '/etc/oozie/conf',
+                              properties = self.getConfig()['configurations']['oozie-log4j'],
+                              )
     self.assertResourceCalled('File', '/etc/oozie/conf/adminusers.txt',
                               owner = 'oozie',
                               group = 'hadoop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/1367ceb2/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 5405b87..21d29bb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -510,6 +510,9 @@
         },
         "pig-log4j": {
             "property1": "value1"
+        },
+        "oozie-log4j": {
+            "property1": "value1"
         }
     },
     "configurationTags": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1367ceb2/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index 81da171..758fe0b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -637,6 +637,9 @@
         },
         "pig-log4j": {
             "property1": "value1"
+        },
+        "oozie-log4j": {
+            "property1": "value1"
         }
     }, 
     "configurationTags": {


[3/5] AMBARI-4687. Write unnitests for HDFS install script on HDP1 and HDP2 (Eugene Chekanskiy via aonishuk)

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
new file mode 100644
index 0000000..254f3a1
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -0,0 +1,437 @@
+{
+    "roleCommand": "START", 
+    "clusterName": "cl1", 
+    "hostname": "c6401.ambari.apache.org", 
+    "passiveInfo": [], 
+    "hostLevelParams": {
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]", 
+        "package_list": "[{\"type\":\"rpm\",\"name\":\"hadoop-yarn\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-nodemanager\"},{\"type\":\"rpm\",\"name\":\"hadoop-mapreduce\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-proxyserver\"},{\"type\":\"rpm\",\"name\":\"hadoop-yarn-resourcemanager\"}]", 
+        "stack_version": "2.0.6", 
+        "stack_name": "HDP", 
+        "db_name": "ambari", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-7u45-linux-x64.tar.gz", 
+        "ambari_db_rca_username": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45", 
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {}, 
+    "serviceName": "YARN", 
+    "role": "NODEMANAGER", 
+    "commandParams": {
+        "service_package_folder": "HDP/2.0.6/services/YARN/package", 
+        "script": "scripts/nodemanager.py", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "schema_version": "2.0", 
+        "command_timeout": "600", 
+        "script_type": "PYTHON"
+    }, 
+    "taskId": 93, 
+    "public_hostname": "c6401.ambari.apache.org", 
+    "configurations": {
+        "mapred-site": {
+            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 
+            "mapreduce.cluster.administrators": " hadoop", 
+            "mapreduce.reduce.input.buffer.percent": "0.0", 
+            "mapreduce.output.fileoutputformat.compress": "false", 
+            "mapreduce.framework.name": "yarn", 
+            "mapreduce.map.speculative": "false", 
+            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
+            "yarn.app.mapreduce.am.resource.mb": "683", 
+            "mapreduce.map.java.opts": "-Xmx273m", 
+            "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*", 
+            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
+            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
+            "mapreduce.reduce.speculative": "false", 
+            "mapreduce.reduce.java.opts": "-Xmx546m", 
+            "mapreduce.am.max-attempts": "2", 
+            "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.reduce.log.level": "INFO", 
+            "mapreduce.map.sort.spill.percent": "0.7", 
+            "mapreduce.task.timeout": "300000", 
+            "mapreduce.map.memory.mb": "341", 
+            "mapreduce.task.io.sort.factor": "100", 
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
+            "mapreduce.reduce.memory.mb": "683", 
+            "yarn.app.mapreduce.am.log.level": "INFO", 
+            "mapreduce.map.log.level": "INFO", 
+            "mapreduce.shuffle.port": "13562", 
+            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`", 
+            "mapreduce.map.output.compress": "false", 
+            "yarn.app.mapreduce.am.staging-dir": "/user", 
+            "mapreduce.reduce.shuffle.parallelcopies": "30", 
+            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
+            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
+            "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.task.io.sort.mb": "136", 
+            "yarn.app.mapreduce.am.command-opts": "-Xmx546m", 
+            "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
+        }, 
+        "global": {
+            "security_enabled": "false", 
+            "proxyuser_group": "users", 
+            "zk_user": "zookeeper", 
+            "falcon_user": "falcon", 
+            "syncLimit": "5", 
+            "yarn_user": "yarn", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "yarn_heapsize": "1024", 
+            "rca_enabled": "false", 
+            "namenode_heapsize": "1024m", 
+            "oozie_user": "oozie", 
+            "hcat_conf_dir": "", 
+            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
+            "lzo_enabled": "true", 
+            "namenode_opt_maxnewsize": "200m", 
+            "smokeuser": "ambari-qa", 
+            "hive_user": "hive", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "zk_data_dir": "/hadoop/zookeeper", 
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "initLimit": "10", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "user_group": "hadoop", 
+            "dtnode_heapsize": "1024m", 
+            "gmond_user": "nobody", 
+            "tickTime": "2000", 
+            "storm_user": "storm", 
+            "clientPort": "2181", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce", 
+            "nagios_group": "nagios", 
+            "hdfs_user": "hdfs", 
+            "hbase_user": "hbase", 
+            "webhcat_user": "hcat", 
+            "nodemanager_heapsize": "1024", 
+            "gmetad_user": "nobody", 
+            "namenode_opt_newsize": "200m", 
+            "mapred_user": "mapred", 
+            "resourcemanager_heapsize": "1024", 
+            "hcat_user": "hcat", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "nagios_user": "nagios"
+        }, 
+        "capacity-scheduler": {
+            "yarn.scheduler.capacity.node-locality-delay": "40", 
+            "yarn.scheduler.capacity.root.capacity": "100", 
+            "yarn.scheduler.capacity.root.acl_administer_queues": "*", 
+            "yarn.scheduler.capacity.root.queues": "default", 
+            "yarn.scheduler.capacity.maximum-applications": "10000", 
+            "yarn.scheduler.capacity.root.default.user-limit-factor": "1", 
+            "yarn.scheduler.capacity.root.unfunded.capacity": "50", 
+            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
+            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
+            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", 
+            "yarn.scheduler.capacity.root.default.capacity": "100", 
+            "yarn.scheduler.capacity.root.default.acl_submit_jobs": "*"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.namenode.shared.edits.dir": "qjournal://c6401.ambari.apache.org:8485;c6402.ambari.apache.org:8485;c6403.ambari.apache.org:8485/ns1", 
+            "dfs.namenode.rpc-address.ns1.nn1": "c6401.ambari.apache.org:8020", 
+            "dfs.namenode.http-address.ns1.nn2": "c6402.ambari.apache.org:50070", 
+            "dfs.namenode.http-address.ns1.nn1": "c6401.ambari.apache.org:50070", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.namenode.rpc-address.ns1.nn2": "c6402.ambari.apache.org:8020", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
+            "dfs.cluster.administrators": " hdfs", 
+            "ambari.dfs.datanode.http.port": "50075", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1.0f", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "dfs.ha.automatic-failover.enabled": "true", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journal", 
+            "dfs.blocksize": "134217728", 
+            "dfs.datanode.max.transfer.threads": "1024", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "100", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
+            "dfs.nameservices": "ns1", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.https-address.ns1.nn2": "c6402.ambari.apache.org:50470", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.namenode.https-address.ns1.nn1": "c6401.ambari.apache.org:50470", 
+            "dfs.client.failover.proxy.provider.ns1": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "ambari.dfs.datanode.port": "50010", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
+            "dfs.ha.fencing.methods": "shell(/bin/true)", 
+            "dfs.datanode.http.address": "0.0.0.0:${ambari.dfs.datanode.http.port}", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.ha.namenodes.ns1": "nn1,nn2", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.https.port": "50470", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600"
+        }, 
+        "yarn-log4j": {
+            "log4j.appender.JSA.layout.ConversionPattern": "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n", 
+            "log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary": "${yarn.server.resourcemanager.appsummary.logger}", 
+            "log4j.appender.RMSUMMARY.File": "/var/log/hadoop-yarn/yarn/${yarn.server.resourcemanager.appsummary.log.file}", 
+            "log4j.appender.RMSUMMARY.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.RMSUMMARY.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "hadoop.mapreduce.jobsummary.log.file": "hadoop-mapreduce.jobsummary.log", 
+            "log4j.appender.RMSUMMARY.MaxBackupIndex": "20", 
+            "log4j.appender.RMSUMMARY": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.JSA": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.mapreduce.jobsummary.logger": "${hadoop.root.logger}", 
+            "yarn.server.resourcemanager.appsummary.log.file": "hadoop-mapreduce.jobsummary.log", 
+            "log4j.appender.JSA.DatePattern": ".yyyy-MM-dd", 
+            "yarn.server.resourcemanager.appsummary.logger": "${hadoop.root.logger}", 
+            "log4j.appender.JSA.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.RMSUMMARY.MaxFileSize": "256MB", 
+            "log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary": "false"
+        }, 
+        "core-site": {
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "gluster.daemon.user": "null", 
+            "fs.trash.interval": "360", 
+            "hadoop.security.authentication": "simple", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "fs.AbstractFileSystem.glusterfs.impl": "null", 
+            "fs.defaultFS": "hdfs://ns1", 
+            "ipc.client.connect.max.retries": "50", 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.security.authorization": "false", 
+            "ha.zookeeper.quorum": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181", 
+            "ipc.client.connection.maxidletime": "30000", 
+            "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT"
+        }, 
+        "hdfs-log4j": {
+            "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "log4j.appender.DRFAAUDIT.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.EventCounter": "org.apache.hadoop.log.metrics.EventCounter", 
+            "log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit": "false", 
+            "log4j.appender.DRFAS.File": "${hadoop.log.dir}/${hadoop.security.log.file}", 
+            "log4j.appender.NullAppender": "org.apache.log4j.varia.NullAppender", 
+            "log4j.appender.MRAUDIT.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "log4j.additivity.org.apache.hadoop.mapred.AuditLogger": "false", 
+            "log4j.appender.DRFAS": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.tasklog.noKeepSplits": "4", 
+            "log4j.appender.DRFAAUDIT": "org.apache.log4j.DailyRollingFileAppender", 
+            "log4j.appender.DRFAAUDIT.File": "${hadoop.log.dir}/hdfs-audit.log", 
+            "log4j.appender.DRFAS.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.MRAUDIT": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.security.log.maxbackupindex": "20", 
+            "log4j.appender.DRFA.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.console.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service": "ERROR", 
+            "log4j.appender.RFA.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.tasklog.taskid": "null", 
+            "log4j.appender.RFAS.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.root.logger": "INFO,console", 
+            "hadoop.security.logger": "INFO,console", 
+            "log4j.appender.DRFAAUDIT.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "log4j.appender.RFAS.MaxFileSize": "${hadoop.security.log.maxfilesize}", 
+            "log4j.appender.MRAUDIT.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.RFA.File": "${hadoop.log.dir}/${hadoop.log.file}", 
+            "log4j.appender.RFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "log4j.appender.TLA": "org.apache.hadoop.mapred.TaskLogAppender", 
+            "log4j.logger.org.apache.hadoop.metrics2": "${hadoop.metrics.log.level}", 
+            "log4j.appender.DRFA.File": "${hadoop.log.dir}/${hadoop.log.file}", 
+            "log4j.appender.TLA.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.log.file": "hadoop.log", 
+            "hadoop.security.log.file": "SecurityAuth.audit", 
+            "log4j.appender.console.target": "System.err", 
+            "log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit": "${hdfs.audit.logger}", 
+            "hdfs.audit.logger": "INFO,console", 
+            "log4j.appender.RFAS.MaxBackupIndex": "${hadoop.security.log.maxbackupindex}", 
+            "log4j.appender.TLA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "hadoop.tasklog.iscleanup": "false", 
+            "mapred.audit.logger": "INFO,console", 
+            "log4j.appender.DRFAAUDIT.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.tasklog.logsRetainHours": "12", 
+            "log4j.appender.MRAUDIT.File": "${hadoop.log.dir}/mapred-audit.log", 
+            "log4j.appender.TLA.totalLogFileSize": "${hadoop.tasklog.totalLogFileSize}", 
+            "log4j.appender.DRFA": "org.apache.log4j.DailyRollingFileAppender", 
+            "log4j.category.SecurityLogger": "${hadoop.security.logger}", 
+            "hadoop.tasklog.totalLogFileSize": "100", 
+            "log4j.appender.RFA.MaxFileSize": "256MB", 
+            "log4j.appender.RFAS": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.RFA": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.RFA.layout.ConversionPattern": "%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n", 
+            "log4j.appender.DRFAS.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.threshhold": "ALL", 
+            "log4j.appender.TLA.isCleanup": "${hadoop.tasklog.iscleanup}", 
+            "log4j.appender.TLA.taskId": "${hadoop.tasklog.taskid}", 
+            "log4j.appender.console.layout.ConversionPattern": "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n", 
+            "log4j.appender.MRAUDIT.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.console": "org.apache.log4j.ConsoleAppender", 
+            "hadoop.log.dir": ".", 
+            "hadoop.security.log.maxfilesize": "256MB", 
+            "hadoop.metrics.log.level": "INFO", 
+            "log4j.appender.RFA.MaxBackupIndex": "10", 
+            "log4j.rootLogger": "${hadoop.root.logger}, EventCounter", 
+            "log4j.appender.RFAS.File": "${hadoop.log.dir}/${hadoop.security.log.file}", 
+            "log4j.logger.org.apache.hadoop.mapred.AuditLogger": "${mapred.audit.logger}", 
+            "hadoop.tasklog.purgeLogSplits": "true", 
+            "log4j.appender.DRFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n"
+        }, 
+        "zookeeper-log4j": {
+            "log4j.appender.CONSOLE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n", 
+            "log4j.appender.CONSOLE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n", 
+            "log4j.appender.ROLLINGFILE": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.CONSOLE.Threshold": "INFO", 
+            "log4j.appender.CONSOLE": "org.apache.log4j.ConsoleAppender", 
+            "log4j.appender.ROLLINGFILE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.Threshold": "TRACE", 
+            "log4j.appender.ROLLINGFILE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n", 
+            "log4j.appender.TRACEFILE": "org.apache.log4j.FileAppender", 
+            "log4j.appender.TRACEFILE.File": "zookeeper_trace.log", 
+            "log4j.appender.ROLLINGFILE.File": "zookeeper.log", 
+            "log4j.appender.ROLLINGFILE.MaxFileSize": "10MB", 
+            "log4j.appender.ROLLINGFILE.Threshold": "DEBUG"
+        }, 
+        "yarn-site": {
+            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
+            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 
+            "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", 
+            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025", 
+            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
+            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org", 
+            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
+            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", 
+            "yarn.nodemanager.resource.memory-mb": "2048", 
+            "yarn.scheduler.minimum-allocation-mb": "683", 
+            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050", 
+            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030", 
+            "yarn.log-aggregation.retain-seconds": "2592000", 
+            "yarn.scheduler.maximum-allocation-mb": "2048", 
+            "yarn.log-aggregation-enable": "true", 
+            "yarn.nodemanager.address": "0.0.0.0:45454", 
+            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
+            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
+            "yarn.nodemanager.log.retain-second": "604800", 
+            "yarn.nodemanager.delete.debug-delay-sec": "0", 
+            "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
+            "yarn.nodemanager.health-checker.interval-ms": "135000", 
+            "yarn.resourcemanager.am.max-attempts": "2", 
+            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
+            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
+            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
+            "yarn.nodemanager.vmem-check-enabled": "false", 
+            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
+            "yarn.admin.acl": "*", 
+            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088", 
+            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", 
+            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
+            "yarn.acl.enable": "true", 
+            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs", 
+            "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*", 
+            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 
+            "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler"
+        }
+    }, 
+    "configurationTags": {
+        "capacity-scheduler": {
+            "tag": "version1"
+        }, 
+        "global": {
+            "tag": "version1"
+        }, 
+        "mapred-site": {
+            "tag": "version1"
+        }, 
+        "hdfs-site": {
+            "tag": "version1392401108182"
+        }, 
+        "yarn-log4j": {
+            "tag": "version1"
+        }, 
+        "core-site": {
+            "tag": "version1392401108196"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "yarn-site": {
+            "tag": "version1"
+        }
+    }, 
+    "commandId": "23-4", 
+    "clusterHostInfo": {
+        "nm_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "zkfc_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670", 
+            "8670", 
+            "8670"
+        ], 
+        "journalnode_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "rm_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "hs_host": [
+            "c6402.ambari.apache.org"
+        ]
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
new file mode 100644
index 0000000..7db1bbd
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
@@ -0,0 +1,521 @@
+{
+    "roleCommand": "START", 
+    "clusterName": "cl1", 
+    "hostname": "c6401.ambari.apache.org", 
+    "passiveInfo": [], 
+    "hostLevelParams": {
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "oracle_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "repo_info": "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0.6\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]", 
+        "package_list": "[{\"type\":\"rpm\",\"name\":\"lzo\"},{\"type\":\"rpm\",\"name\":\"hadoop\"},{\"type\":\"rpm\",\"name\":\"hadoop-libhdfs\"},{\"type\":\"rpm\",\"name\":\"hadoop-lzo\"},{\"type\":\"rpm\",\"name\":\"hadoop-lzo-native\"},{\"type\":\"rpm\",\"name\":\"snappy\"},{\"type\":\"rpm\",\"name\":\"snappy-devel\"},{\"type\":\"rpm\",\"name\":\"ambari-log4j\"}]", 
+        "stack_version": "2.0.6", 
+        "stack_name": "HDP", 
+        "db_name": "ambari", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-7u45-linux-x64.tar.gz", 
+        "ambari_db_rca_username": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45", 
+        "mysql_jdbc_url": "http://c6401.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {}, 
+    "serviceName": "HDFS", 
+    "role": "ZKFC", 
+    "commandParams": {
+        "service_package_folder": "HDP/2.0.6/services/HDFS/package", 
+        "script": "scripts/zkfc_slave.py", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "schema_version": "2.0", 
+        "command_timeout": "600", 
+        "script_type": "PYTHON"
+    }, 
+    "taskId": 138, 
+    "public_hostname": "c6401.ambari.apache.org", 
+    "configurations": {
+        "mapred-site": {
+            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 
+            "mapreduce.jobhistory.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab", 
+            "mapreduce.reduce.input.buffer.percent": "0.0", 
+            "mapreduce.output.fileoutputformat.compress": "false", 
+            "mapreduce.framework.name": "yarn", 
+            "mapreduce.map.speculative": "false", 
+            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
+            "yarn.app.mapreduce.am.resource.mb": "683", 
+            "mapreduce.map.java.opts": "-Xmx273m", 
+            "mapreduce.cluster.administrators": " hadoop", 
+            "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*", 
+            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
+            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
+            "mapreduce.reduce.speculative": "false", 
+            "mapreduce.reduce.java.opts": "-Xmx546m", 
+            "mapreduce.am.max-attempts": "2", 
+            "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.reduce.log.level": "INFO", 
+            "mapreduce.map.sort.spill.percent": "0.7", 
+            "mapreduce.task.io.sort.mb": "136", 
+            "mapreduce.task.timeout": "300000", 
+            "mapreduce.map.memory.mb": "341", 
+            "mapreduce.task.io.sort.factor": "100", 
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
+            "mapreduce.reduce.memory.mb": "683", 
+            "mapreduce.jobhistory.principal": "jhs/_HOST@EXAMPLE.COM", 
+            "yarn.app.mapreduce.am.log.level": "INFO", 
+            "mapreduce.map.log.level": "INFO", 
+            "mapreduce.shuffle.port": "13562", 
+            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`", 
+            "mapreduce.map.output.compress": "false", 
+            "yarn.app.mapreduce.am.staging-dir": "/user", 
+            "mapreduce.reduce.shuffle.parallelcopies": "30", 
+            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
+            "mapreduce.jobhistory.keytab": "/etc/security/keytabs/jhs.service.keytab", 
+            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
+            "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.jobhistory.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "yarn.app.mapreduce.am.command-opts": "-Xmx546m", 
+            "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
+        }, 
+        "global": {
+            "syncLimit": "5", 
+            "resourcemanager_principal_name": "rm/_HOST", 
+            "hadoop_http_principal_name": "HTTP/_HOST", 
+            "kinit_path_local": "/usr/bin", 
+            "resourcemanager_http_primary_name": "HTTP", 
+            "datanode_primary_name": "dn", 
+            "namenode_principal_name": "nn/_HOST", 
+            "namenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs_datanode_http_address": "1022", 
+            "falcon_user": "falcon", 
+            "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab", 
+            "namenode_opt_maxnewsize": "200m", 
+            "journalnode_keytab": "/etc/security/keytabs/jn.service.keytab", 
+            "snamenode_primary_name": "nn", 
+            "nagios_primary_name": "nagios", 
+            "jobhistory_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "clientPort": "2181", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce", 
+            "jobhistory_keytab": "/etc/security/keytabs/jhs.service.keytab", 
+            "datanode_principal_name": "dn/_HOST", 
+            "namenode_opt_newsize": "200m", 
+            "nagios_group": "nagios", 
+            "hcat_user": "hcat", 
+            "hadoop_heapsize": "1024", 
+            "hbase_regionserver_primary_name": "hbase", 
+            "zk_user": "zookeeper", 
+            "keytab_path": "/etc/security/keytabs", 
+            "nodemanager_primary_name": "nm", 
+            "zk_data_dir": "/hadoop/zookeeper", 
+            "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", 
+            "namenode_heapsize": "1024m", 
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
+            "kerberos_domain": "EXAMPLE.COM", 
+            "yarn_nodemanager_container-executor_class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
+            "lzo_enabled": "true", 
+            "dtnode_heapsize": "1024m", 
+            "dfs_datanode_address": "1019", 
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "initLimit": "10", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "namenode_primary_name": "nn", 
+            "tickTime": "2000", 
+            "storm_user": "storm", 
+            "datanode_keytab": "/etc/security/keytabs/dn.service.keytab", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn", 
+            "journalnode_primary_name": "jn", 
+            "hbase_user": "hbase", 
+            "gmetad_user": "nobody", 
+            "nodemanager_http_primary_name": "HTTP", 
+            "smokeuser": "ambari-qa", 
+            "nodemanager_keytab": "/etc/security/keytabs/nm.service.keytab", 
+            "nagios_user": "nagios", 
+            "security_enabled": "true", 
+            "proxyuser_group": "users", 
+            "hbase_primary_name": "hbase", 
+            "oozie_http_primary_name": "HTTP", 
+            "yarn_heapsize": "1024", 
+            "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM", 
+            "nodemanager_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
+            "oozie_user": "oozie", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "zookeeper_primary_name": "zookeeper", 
+            "yarn_user": "yarn", 
+            "gmond_user": "nobody", 
+            "hive_metastore_primary_name": "hive", 
+            "jobhistory_primary_name": "jhs", 
+            "hdfs_user": "hdfs", 
+            "webhcat_user": "hcat", 
+            "nodemanager_heapsize": "1024", 
+            "resourcemanager_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "snamenode_keytab": "/etc/security/keytabs/nn.service.keytab", 
+            "smokeuser_principal_name": "ambari-qa", 
+            "mapred_user": "mapred", 
+            "jobhistory_http_primary_name": "HTTP", 
+            "smokeuser_primary_name": "ambari-qa", 
+            "hadoop_http_keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "hbase_master_primary_name": "hbase", 
+            "hdfs_primary_name": "hdfs", 
+            "jobhistory_principal_name": "jhs/_HOST", 
+            "webHCat_http_primary_name": "HTTP", 
+            "rca_enabled": "false", 
+            "hcat_conf_dir": "", 
+            "resourcemanager_primary_name": "rm", 
+            "hadoop_http_primary_name": "HTTP", 
+            "jobhistory_http_principal_name": "HTTP/_HOST", 
+            "resourcemanager_keytab": "/etc/security/keytabs/rm.service.keytab", 
+            "snamenode_principal_name": "nn/_HOST", 
+            "nodemanager_principal_name": "nm/_HOST", 
+            "user_group": "hadoop", 
+            "nodemanager_http_principal_name": "HTTP/_HOST", 
+            "hive_user": "hive", 
+            "resourcemanager_http_principal_name": "HTTP/_HOST", 
+            "oozie_primary_name": "oozie", 
+            "kerberos_install_type": "MANUALLY_SET_KERBEROS", 
+            "journalnode_principal_name": "jn/_HOST", 
+            "resourcemanager_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "hbase_principal_name": "hbase", 
+            "hdfs_principal_name": "hdfs"
+        }, 
+        "capacity-scheduler": {
+            "yarn.scheduler.capacity.node-locality-delay": "40", 
+            "yarn.scheduler.capacity.root.capacity": "100", 
+            "yarn.scheduler.capacity.root.acl_administer_queues": "*", 
+            "yarn.scheduler.capacity.root.queues": "default", 
+            "yarn.scheduler.capacity.maximum-applications": "10000", 
+            "yarn.scheduler.capacity.root.default.user-limit-factor": "1", 
+            "yarn.scheduler.capacity.root.unfunded.capacity": "50", 
+            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
+            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
+            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", 
+            "yarn.scheduler.capacity.root.default.capacity": "100", 
+            "yarn.scheduler.capacity.root.default.acl_submit_jobs": "*"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.namenode.shared.edits.dir": "qjournal://c6401.ambari.apache.org:8485;c6402.ambari.apache.org:8485;c6403.ambari.apache.org:8485/ns1", 
+            "dfs.namenode.rpc-address.ns1.nn1": "c6401.ambari.apache.org:8020", 
+            "dfs.namenode.http-address.ns1.nn2": "c6402.ambari.apache.org:50070", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.namenode.http-address.ns1.nn1": "c6401.ambari.apache.org:50070", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
+            "dfs.cluster.administrators": " hdfs", 
+            "ambari.dfs.datanode.http.port": "1022", 
+            "dfs.journalnode.kerberos.principal": "jn/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1.0f", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.https.port": "50470", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "dfs.ha.automatic-failover.enabled": "true", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journal", 
+            "dfs.blocksize": "134217728", 
+            "dfs.datanode.max.transfer.threads": "1024", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "100", 
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode", 
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "${dfs.web.authentication.kerberos.principal}", 
+            "dfs.nameservices": "ns1", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.https-address.ns1.nn2": "c6402.ambari.apache.org:50470", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.namenode.https-address.ns1.nn1": "c6401.ambari.apache.org:50470", 
+            "dfs.client.failover.proxy.provider.ns1": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "ambari.dfs.datanode.port": "1019", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
+            "dfs.ha.fencing.methods": "shell(/bin/true)", 
+            "dfs.journalnode.keytab.file": "/etc/security/keytabs/jn.service.keytab", 
+            "dfs.datanode.http.address": "0.0.0.0:${ambari.dfs.datanode.http.port}", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.namenode.rpc-address.ns1.nn2": "c6402.ambari.apache.org:8020", 
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.ha.namenodes.ns1": "nn1,nn2", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.journalnode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.checkpoint.period": "21600"
+        }, 
+        "yarn-log4j": {
+            "log4j.appender.JSA.layout.ConversionPattern": "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n", 
+            "log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary": "${yarn.server.resourcemanager.appsummary.logger}", 
+            "log4j.appender.RMSUMMARY.File": "/var/log/hadoop-yarn/yarn/${yarn.server.resourcemanager.appsummary.log.file}", 
+            "log4j.appender.RMSUMMARY.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.RMSUMMARY.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "hadoop.mapreduce.jobsummary.log.file": "hadoop-mapreduce.jobsummary.log", 
+            "log4j.appender.RMSUMMARY.MaxBackupIndex": "20", 
+            "log4j.appender.RMSUMMARY": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.JSA": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.mapreduce.jobsummary.logger": "${hadoop.root.logger}", 
+            "yarn.server.resourcemanager.appsummary.log.file": "hadoop-mapreduce.jobsummary.log", 
+            "log4j.appender.JSA.DatePattern": ".yyyy-MM-dd", 
+            "yarn.server.resourcemanager.appsummary.logger": "${hadoop.root.logger}", 
+            "log4j.appender.JSA.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.RMSUMMARY.MaxFileSize": "256MB", 
+            "log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary": "false"
+        }, 
+        "core-site": {
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "gluster.daemon.user": "null", 
+            "fs.trash.interval": "360", 
+            "hadoop.security.authentication": "kerberos", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "fs.AbstractFileSystem.glusterfs.impl": "null", 
+            "fs.defaultFS": "hdfs://ns1", 
+            "ipc.client.connect.max.retries": "50", 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.security.authorization": "true", 
+            "ha.zookeeper.quorum": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181", 
+            "ipc.client.connection.maxidletime": "30000", 
+            "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](jhs@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nDEFAULT"
+        }, 
+        "hdfs-log4j": {
+            "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "log4j.appender.DRFAAUDIT.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.EventCounter": "org.apache.hadoop.log.metrics.EventCounter", 
+            "log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit": "false", 
+            "log4j.appender.DRFAS.File": "${hadoop.log.dir}/${hadoop.security.log.file}", 
+            "log4j.appender.NullAppender": "org.apache.log4j.varia.NullAppender", 
+            "log4j.appender.MRAUDIT.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "log4j.additivity.org.apache.hadoop.mapred.AuditLogger": "false", 
+            "log4j.appender.DRFAS": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.tasklog.noKeepSplits": "4", 
+            "log4j.appender.DRFAAUDIT": "org.apache.log4j.DailyRollingFileAppender", 
+            "log4j.appender.DRFAAUDIT.File": "${hadoop.log.dir}/hdfs-audit.log", 
+            "log4j.appender.DRFAS.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.MRAUDIT": "org.apache.log4j.DailyRollingFileAppender", 
+            "hadoop.security.log.maxbackupindex": "20", 
+            "log4j.appender.DRFA.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.console.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service": "ERROR", 
+            "log4j.appender.RFA.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.tasklog.taskid": "null", 
+            "log4j.appender.RFAS.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.root.logger": "INFO,console", 
+            "hadoop.security.logger": "INFO,console", 
+            "log4j.appender.DRFAAUDIT.layout.ConversionPattern": "%d{ISO8601} %p %c{2}: %m%n", 
+            "log4j.appender.RFAS.MaxFileSize": "${hadoop.security.log.maxfilesize}", 
+            "log4j.appender.MRAUDIT.DatePattern": ".yyyy-MM-dd", 
+            "log4j.appender.RFA.File": "${hadoop.log.dir}/${hadoop.log.file}", 
+            "log4j.appender.RFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "log4j.appender.TLA": "org.apache.hadoop.mapred.TaskLogAppender", 
+            "log4j.logger.org.apache.hadoop.metrics2": "${hadoop.metrics.log.level}", 
+            "log4j.appender.DRFA.File": "${hadoop.log.dir}/${hadoop.log.file}", 
+            "log4j.appender.TLA.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.log.file": "hadoop.log", 
+            "hadoop.security.log.file": "SecurityAuth.audit", 
+            "log4j.appender.console.target": "System.err", 
+            "log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit": "${hdfs.audit.logger}", 
+            "hdfs.audit.logger": "INFO,console", 
+            "log4j.appender.RFAS.MaxBackupIndex": "${hadoop.security.log.maxbackupindex}", 
+            "log4j.appender.TLA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 
+            "hadoop.tasklog.iscleanup": "false", 
+            "mapred.audit.logger": "INFO,console", 
+            "log4j.appender.DRFAAUDIT.layout": "org.apache.log4j.PatternLayout", 
+            "hadoop.tasklog.logsRetainHours": "12", 
+            "log4j.appender.MRAUDIT.File": "${hadoop.log.dir}/mapred-audit.log", 
+            "log4j.appender.TLA.totalLogFileSize": "${hadoop.tasklog.totalLogFileSize}", 
+            "log4j.appender.DRFA": "org.apache.log4j.DailyRollingFileAppender", 
+            "log4j.category.SecurityLogger": "${hadoop.security.logger}", 
+            "hadoop.tasklog.totalLogFileSize": "100", 
+            "log4j.appender.RFA.MaxFileSize": "256MB", 
+            "log4j.appender.RFAS": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.RFA": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.RFA.layout.ConversionPattern": "%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n", 
+            "log4j.appender.DRFAS.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.threshhold": "ALL", 
+            "log4j.appender.TLA.isCleanup": "${hadoop.tasklog.iscleanup}", 
+            "log4j.appender.TLA.taskId": "${hadoop.tasklog.taskid}", 
+            "log4j.appender.console.layout.ConversionPattern": "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n", 
+            "log4j.appender.MRAUDIT.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.console": "org.apache.log4j.ConsoleAppender", 
+            "hadoop.log.dir": ".", 
+            "hadoop.security.log.maxfilesize": "256MB", 
+            "hadoop.metrics.log.level": "INFO", 
+            "log4j.appender.RFA.MaxBackupIndex": "10", 
+            "log4j.rootLogger": "${hadoop.root.logger}, EventCounter", 
+            "log4j.appender.RFAS.File": "${hadoop.log.dir}/${hadoop.security.log.file}", 
+            "log4j.logger.org.apache.hadoop.mapred.AuditLogger": "${mapred.audit.logger}", 
+            "hadoop.tasklog.purgeLogSplits": "true", 
+            "log4j.appender.DRFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n"
+        }, 
+        "zookeeper-log4j": {
+            "log4j.appender.CONSOLE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n", 
+            "log4j.appender.CONSOLE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n", 
+            "log4j.appender.ROLLINGFILE": "org.apache.log4j.RollingFileAppender", 
+            "log4j.appender.CONSOLE.Threshold": "INFO", 
+            "log4j.appender.CONSOLE": "org.apache.log4j.ConsoleAppender", 
+            "log4j.appender.ROLLINGFILE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.layout": "org.apache.log4j.PatternLayout", 
+            "log4j.appender.TRACEFILE.Threshold": "TRACE", 
+            "log4j.appender.ROLLINGFILE.layout.ConversionPattern": "%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n", 
+            "log4j.appender.TRACEFILE": "org.apache.log4j.FileAppender", 
+            "log4j.appender.TRACEFILE.File": "zookeeper_trace.log", 
+            "log4j.appender.ROLLINGFILE.File": "zookeeper.log", 
+            "log4j.appender.ROLLINGFILE.MaxFileSize": "10MB", 
+            "log4j.appender.ROLLINGFILE.Threshold": "DEBUG"
+        }, 
+        "yarn-site": {
+            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
+            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor", 
+            "yarn.resourcemanager.principal": "rm/_HOST@EXAMPLE.COM", 
+            "yarn.nodemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab", 
+            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025", 
+            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
+            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org", 
+            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
+            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", 
+            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", 
+            "yarn.scheduler.minimum-allocation-mb": "683", 
+            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050", 
+            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030", 
+            "yarn.log-aggregation.retain-seconds": "2592000", 
+            "yarn.scheduler.maximum-allocation-mb": "2048", 
+            "yarn.log-aggregation-enable": "true", 
+            "yarn.nodemanager.address": "0.0.0.0:45454", 
+            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
+            "yarn.nodemanager.principal": "nm/_HOST@EXAMPLE.COM", 
+            "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", 
+            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
+            "yarn.nodemanager.log.retain-second": "604800", 
+            "yarn.nodemanager.delete.debug-delay-sec": "0", 
+            "yarn.nodemanager.keytab": "/etc/security/keytabs/nm.service.keytab", 
+            "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
+            "yarn.nodemanager.health-checker.interval-ms": "135000", 
+            "yarn.resourcemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "yarn.resourcemanager.am.max-attempts": "2", 
+            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
+            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
+            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
+            "yarn.nodemanager.vmem-check-enabled": "false", 
+            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
+            "yarn.admin.acl": "*", 
+            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088", 
+            "yarn.nodemanager.resource.memory-mb": "2048", 
+            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
+            "yarn.acl.enable": "true", 
+            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs", 
+            "yarn.nodemanager.webapp.spnego-principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*", 
+            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 
+            "yarn.resourcemanager.webapp.spnego-keytab-file": "/etc/security/keytabs/spnego.service.keytab", 
+            "yarn.resourcemanager.keytab": "/etc/security/keytabs/rm.service.keytab", 
+            "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler"
+        }
+    }, 
+    "configurationTags": {
+        "capacity-scheduler": {
+            "tag": "version1"
+        }, 
+        "global": {
+            "tag": "version1392403922876"
+        }, 
+        "mapred-site": {
+            "tag": "version1392403922877"
+        }, 
+        "hdfs-site": {
+            "tag": "version1392403922876"
+        }, 
+        "yarn-log4j": {
+            "tag": "version1"
+        }, 
+        "core-site": {
+            "tag": "version1392403922876"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "yarn-site": {
+            "tag": "version1392403922877"
+        }
+    }, 
+    "commandId": "25-4", 
+    "clusterHostInfo": {
+        "nm_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "zkfc_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670", 
+            "8670", 
+            "8670"
+        ], 
+        "journalnode_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "rm_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6403.ambari.apache.org", 
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "hs_host": [
+            "c6402.ambari.apache.org"
+        ]
+    }
+}
\ No newline at end of file


[5/5] git commit: AMBARI-4655. unittest ExecuteHadoop (Ivan Kozlov via aonishuk)

Posted by ao...@apache.org.
AMBARI-4655. unittest ExecuteHadoop (Ivan Kozlov via aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/88c2472e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/88c2472e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/88c2472e

Branch: refs/heads/trunk
Commit: 88c2472e50ab3a8fc56b542d57e29d66172c5d03
Parents: 9e69e1e
Author: Andrew Onischuk <ao...@hortonworks.com>
Authored: Mon Feb 17 09:14:49 2014 -0800
Committer: Andrew Onischuk <ao...@hortonworks.com>
Committed: Mon Feb 17 09:18:38 2014 -0800

----------------------------------------------------------------------
 .../TestExecuteHadoopResource.py                | 207 +++++++++++++++++++
 1 file changed, 207 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/88c2472e/ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py b/ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py
new file mode 100644
index 0000000..ff4fd51
--- /dev/null
+++ b/ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py
@@ -0,0 +1,207 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+from unittest import TestCase
+from mock.mock import patch
+from resource_management import *
+from resource_management.libraries.resources.execute_hadoop\
+  import ExecuteHadoop
+
+
+class TestExecuteHadoopResource(TestCase):
+  @patch("resource_management.core.providers.system.ExecuteProvider")
+  def test_run_default_args(self, execute_mock):
+    '''
+    Test if default arguments are passed to Execute
+    '''
+    with Environment() as env:
+      ExecuteHadoop("command",
+                    conf_dir="conf_dir",
+                    user="user",
+                    logoutput=True,
+      )
+      self.assertEqual(execute_mock.call_count, 1)
+      self.assertEqual(execute_mock.call_args[0][0].command,'hadoop --config conf_dir command')
+      self.assertEqual(execute_mock.call_args[0][0].arguments,
+                       {'logoutput': True, 'tries': 1, 'user': 'user', 'try_sleep': 0})
+
+
+  @patch("resource_management.core.providers.system.ExecuteProvider")
+  def test_run_unknown_conf(self, execute_mock):
+    '''
+    Test when UnknownConfiguration passed
+    '''
+    with Environment() as env:
+      ExecuteHadoop("command",
+                    kinit_path_local=UnknownConfiguration(name="kinit_path_local"),
+                    conf_dir="conf_dir",
+                    user="user",
+                    keytab=UnknownConfiguration(name="keytab"),
+                    security_enabled=False,
+                    principal=UnknownConfiguration(name="principal")
+                    )
+      self.assertEqual(execute_mock.call_count, 1)
+      self.assertEqual(execute_mock.call_args[0][0].command,'hadoop --config conf_dir command')
+      self.assertEqual(execute_mock.call_args[0][0].arguments,
+                       {'logoutput': False, 'tries': 1, 'user': 'user', 'try_sleep': 0})
+
+
+  @patch("resource_management.core.providers.system.ExecuteProvider")
+  def test_run_defined_args(self, execute_mock):
+    '''
+    Test if defined arguments are passed to Execute
+    '''
+    with Environment("/") as env:
+      ExecuteHadoop("command",
+                    action="run",
+                    kinit_path_local="path",
+                    conf_dir="conf_dir",
+                    user="user",
+                    tries=2,
+                    keytab="keytab",
+                    security_enabled=False,
+                    kinit_override=False,
+                    try_sleep=2,
+                    logoutput=True,
+                    principal="principal"
+      )
+      self.assertEqual(execute_mock.call_count, 1)
+      self.assertEqual(execute_mock.call_args[0][0].command,'hadoop --config conf_dir command')
+      self.assertEqual(execute_mock.call_args[0][0].arguments,
+                       {'logoutput': True, 'tries': 2, 'user': 'user', 'try_sleep': 2})
+
+
+  @patch("resource_management.core.providers.system.ExecuteProvider")
+  def test_run_command_list(self, execute_mock):
+    '''
+    Test for "command" passed as List
+    '''
+    with Environment("/") as env:
+      ExecuteHadoop(["command1","command2"],
+                    action="run",
+                    kinit_path_local="path",
+                    conf_dir="conf_dir",
+                    user="user",
+                    keytab="keytab"
+      )
+      self.assertEqual(execute_mock.call_count, 2)
+      self.assertEqual(execute_mock.call_args_list[0][0][0].command,
+                       'hadoop --config conf_dir command1')
+      self.assertEqual(execute_mock.call_args_list[1][0][0].command,
+                       'hadoop --config conf_dir command2')
+      self.assertEqual(execute_mock.call_args_list[0][0][0].arguments,
+                       {'logoutput': False, 'tries': 1, 'user': 'user', 'try_sleep': 0})
+      self.assertEqual(execute_mock.call_args_list[1][0][0].arguments,
+                       {'logoutput': False, 'tries': 1, 'user': 'user', 'try_sleep': 0})
+
+
+  @patch("resource_management.core.providers.system.ExecuteProvider")
+  def test_run_command_tuple(self, execute_mock):
+    '''
+    Test for "command" passed as Tuple
+    '''
+    with Environment("/") as env:
+      ExecuteHadoop(("command1","command2","command3"),
+                    action="run",
+                    kinit_path_local="path",
+                    conf_dir="conf_dir",
+                    user="user",
+                    keytab="keytab"
+      )
+      self.assertEqual(execute_mock.call_count, 1)
+      self.assertEqual(execute_mock.call_args[0][0].command,
+                       'hadoop --config conf_dir command1 command2 command3')
+
+
+  @patch("resource_management.core.providers.system.ExecuteProvider")
+  def test_run_secured(self, execute_mock):
+    '''
+    Test security_enabled=True behaviour
+    '''
+    with Environment("/") as env:
+      ExecuteHadoop("command",
+                    action="run",
+                    kinit_path_local="path",
+                    conf_dir="conf_dir",
+                    user="user",
+                    tries=1,
+                    keytab="keytab",
+                    security_enabled=True,
+                    kinit_override=False,
+                    try_sleep=0,
+                    logoutput=True
+      )
+      self.assertEqual(execute_mock.call_count, 2)
+      self.assertEqual(str(execute_mock.call_args_list[0][0][0]),
+                       "Execute['path -kt keytab user']")
+      self.assertEqual(execute_mock.call_args_list[0][0][0].command,
+                       'path -kt keytab user')
+      self.assertEqual(execute_mock.call_args_list[0][0][0].arguments,
+                       {'path': ['/bin'], 'user': 'user'})
+      self.assertEqual(execute_mock.call_args_list[1][0][0].command,
+                       'hadoop --config conf_dir command')
+      self.assertEqual(execute_mock.call_args_list[1][0][0].arguments,
+                       {'logoutput': True, 'tries': 1, 'user': 'user', 'try_sleep': 0})
+
+
+  @patch("resource_management.core.providers.system.ExecuteProvider")
+  def test_run_secured_kinit_override(self, execute_mock):
+    '''
+    Test security_enabled=True and kinit_override=True behaviour
+    '''
+    with Environment("/") as env:
+      ExecuteHadoop("command",
+                    action="run",
+                    kinit_path_local="path",
+                    conf_dir="conf_dir",
+                    user="user",
+                    tries=1,
+                    keytab="keytab",
+                    security_enabled=True,
+                    kinit_override=True,
+                    try_sleep=0,
+                    logoutput=True
+      )
+      self.assertEqual(execute_mock.call_count, 1)
+      self.assertEqual(execute_mock.call_args_list[0][0][0].command,
+                       'hadoop --config conf_dir command')
+
+
+  @patch("resource_management.core.providers.system.ExecuteProvider")
+  def test_run_secured_principal(self, execute_mock):
+    '''
+    Test with "principal" argument
+    '''
+    with Environment("/") as env:
+      ExecuteHadoop("command",
+                    action="run",
+                    kinit_path_local="path",
+                    conf_dir="conf_dir",
+                    user="user",
+                    tries=1,
+                    keytab="keytab",
+                    security_enabled=True,
+                    kinit_override=False,
+                    try_sleep=0,
+                    logoutput=True,
+                    principal="principal")
+      self.assertEqual(execute_mock.call_count, 2)
+      self.assertEqual(execute_mock.call_args_list[0][0][0].command,
+                       'path -kt keytab principal')
+      self.assertEqual(execute_mock.call_args_list[1][0][0].command,
+                       'hadoop --config conf_dir command')
\ No newline at end of file


[4/5] git commit: AMBARI-4687. Write unnitests for HDFS install script on HDP1 and HDP2 (Eugene Chekanskiy via aonishuk)

Posted by ao...@apache.org.
AMBARI-4687. Write unnitests for HDFS install script on HDP1 and HDP2
(Eugene Chekanskiy via aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a5a02039
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a5a02039
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a5a02039

Branch: refs/heads/trunk
Commit: a5a0203967b1076d579c8237f7e343b5794b715e
Parents: 33bf1d4
Author: Andrew Onischuk <ao...@hortonworks.com>
Authored: Mon Feb 17 08:37:54 2014 -0800
Committer: Andrew Onischuk <ao...@hortonworks.com>
Committed: Mon Feb 17 09:18:37 2014 -0800

----------------------------------------------------------------------
 .../services/HDFS/package/scripts/datanode.py   |   4 +-
 .../services/HDFS/package/scripts/namenode.py   |   4 +-
 .../services/HDFS/package/scripts/namenode.py   |   8 +-
 .../services/HDFS/package/scripts/snamenode.py  |   4 +-
 .../services/HDFS/package/scripts/zkfc_slave.py |   4 +-
 .../python/stacks/1.3.2/HDFS/test_datanode.py   | 175 +++++++
 .../python/stacks/1.3.2/HDFS/test_namenode.py   | 227 ++++++++
 .../python/stacks/1.3.2/HDFS/test_snamenode.py  | 153 ++++++
 .../python/stacks/2.0.6/HDFS/test_datanode.py   | 168 ++++++
 .../stacks/2.0.6/HDFS/test_journalnode.py       | 155 ++++++
 .../python/stacks/2.0.6/HDFS/test_namenode.py   | 368 +++++++++++++
 .../python/stacks/2.0.6/HDFS/test_snamenode.py  | 156 ++++++
 .../test/python/stacks/2.0.6/HDFS/test_zkfc.py  | 121 +++++
 .../python/stacks/2.0.6/configs/ha_default.json | 437 ++++++++++++++++
 .../python/stacks/2.0.6/configs/ha_secured.json | 521 +++++++++++++++++++
 15 files changed, 2493 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/datanode.py
index eaa27cf..57fdb35 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/datanode.py
@@ -32,7 +32,7 @@ class DataNode(Script):
     import params
 
     env.set_params(params)
-    self.config(env)
+    self.configure(env)
     datanode(action="start")
 
   def stop(self, env):
@@ -41,7 +41,7 @@ class DataNode(Script):
     env.set_params(params)
     datanode(action="stop")
 
-  def config(self, env):
+  def configure(self, env):
     import params
 
     datanode(action="configure")

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/namenode.py
index 80700c8..2f26c98 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/namenode.py
@@ -32,7 +32,7 @@ class NameNode(Script):
     import params
 
     env.set_params(params)
-    self.config(env)
+    self.configure(env)
     namenode(action="start")
 
   def stop(self, env):
@@ -41,7 +41,7 @@ class NameNode(Script):
     env.set_params(params)
     namenode(action="stop")
 
-  def config(self, env):
+  def configure(self, env):
     import params
 
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
index deb01d5..2179292 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
@@ -27,14 +27,14 @@ class NameNode(Script):
 
     self.install_packages(env)
     env.set_params(params)
-    #TODO remove when config action will be implemented
-    self.config(env)
+    #TODO we need this for HA because of manual steps
+    self.configure(env)
 
   def start(self, env):
     import params
 
     env.set_params(params)
-    self.config(env)
+    self.configure(env)
     namenode(action="start")
 
   def stop(self, env):
@@ -43,7 +43,7 @@ class NameNode(Script):
     env.set_params(params)
     namenode(action="stop")
 
-  def config(self, env):
+  def configure(self, env):
     import params
 
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
index 8f682ec..b2a3bd1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
@@ -35,7 +35,7 @@ class SNameNode(Script):
 
     env.set_params(params)
 
-    self.config(env)
+    self.configure(env)
     snamenode(action="start")
 
   def stop(self, env):
@@ -45,7 +45,7 @@ class SNameNode(Script):
 
     snamenode(action="stop")
 
-  def config(self, env):
+  def configure(self, env):
     import params
 
     env.set_params(params)

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
index 1f9ba65..f415f24 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
@@ -32,7 +32,7 @@ class ZkfcSlave(Script):
     import params
 
     env.set_params(params)
-    self.config(env)
+    self.configure(env)
     service(
       action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
       create_log_dir=True
@@ -47,7 +47,7 @@ class ZkfcSlave(Script):
       create_log_dir=True
     )
 
-  def config(self, env):
+  def configure(self, env):
     pass
 
   def status(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
new file mode 100644
index 0000000..42b9fe0
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_datanode.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+
+class TestDatanode(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "start",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "start",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/dn.service.keytab dn/c6402.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+                              user = 'root',
+                              )
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/dn.service.keytab dn/c6402.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+                              not_if = None,
+                              user = 'root',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/lib/hadoop-hdfs',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0750,
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/hdfs',
+                              mode = 0755,
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/data',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0750,
+                              recursive = False,
+                              )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/var/lib/hadoop-hdfs',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0750,
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/hdfs',
+                              mode = 0755,
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/data',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0750,
+                              recursive = False,
+                              )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py
new file mode 100644
index 0000000..0bed3d6
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_namenode.py
@@ -0,0 +1,227 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+
+class TestNamenode(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "start",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertResourceCalled('File', '/tmp/checkForFormat.sh',
+                              content = StaticFile('checkForFormat.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', 'sh /tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/',
+                              )
+    self.assertResourceCalled('Execute', 'mkdir -p /var/run/hadoop/hdfs/namenode/formatted/',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+                              tries = 40,
+                              try_sleep = 10,
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/tmp',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0777,
+                              owner = 'hdfs',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0770,
+                              owner = 'ambari-qa',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              action = ['create'],
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "start",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertResourceCalled('File', '/tmp/checkForFormat.sh',
+                              content = StaticFile('checkForFormat.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', 'sh /tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/',
+                              )
+    self.assertResourceCalled('Execute', 'mkdir -p /var/run/hadoop/hdfs/namenode/formatted/',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/nn.service.keytab nn/c6402.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+                              tries = 40,
+                              try_sleep = 10,
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/tmp',
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0777,
+                              owner = 'hdfs',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0770,
+                              owner = 'ambari-qa',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              action = ['create'],
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/nn.service.keytab nn/c6402.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              recursive = True,
+                              mode = 0755,
+                              )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              recursive = True,
+                              mode = 0755,
+                              )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py
new file mode 100644
index 0000000..9436264
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_snamenode.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+
+class TestSNamenode(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "start",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+
+  def test_configure_secured(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "start",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/nn.service.keytab nn/c6402.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+
+  def test_stop_secured(self):
+    self.executeScript("1.3.2/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/nn.service.keytab nn/c6402.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/bin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namesecondary',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755,
+                              recursive = True,
+                              )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namesecondary',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755,
+                              recursive = True,
+                              )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
new file mode 100644
index 0000000..838c53a
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+
+class TestDatanode(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "start",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "start",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/dn.service.keytab dn/c6401.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid` >/dev/null 2>&1',
+                              user = 'root',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/dn.service.keytab dn/c6401.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
+                              not_if = None,
+                              user = 'root',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/var/lib/hadoop-hdfs',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0750,
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/data',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755,
+                              recursive = True,
+                              )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/var/lib/hadoop-hdfs',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0750,
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/data',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755,
+                              recursive = True,
+                              )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
new file mode 100644
index 0000000..f453a6a
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+
+class TestJournalnode(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+                       classname = "JournalNode",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+                       classname = "JournalNode",
+                       command = "start",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+                       classname = "JournalNode",
+                       command = "stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+                       classname = "JournalNode",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+                       classname = "JournalNode",
+                       command = "start",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/jn.service.keytab jn/c6401.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start journalnode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+                       classname = "JournalNode",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/jn.service.keytab jn/c6401.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-journalnode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/grid/0/hdfs/journal',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              recursive = True,
+                              )
+
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/grid/0/hdfs/journal',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              recursive = True,
+                              )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
new file mode 100644
index 0000000..a91d3f9
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -0,0 +1,368 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+
+class TestNamenode(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "start",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertResourceCalled('File', '/tmp/checkForFormat.sh',
+                              content = StaticFile('checkForFormat.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', 'sh /tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/',
+                              )
+    self.assertResourceCalled('Execute', 'mkdir -p /var/run/hadoop/hdfs/namenode/formatted/',
+                              )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+                              tries = 40,
+                              only_if = None,
+                              try_sleep = 10,
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/tmp',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0777,
+                              owner = 'hdfs',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0770,
+                              owner = 'ambari-qa',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              action = ['create'],
+                              only_if = None,
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "start",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertResourceCalled('File', '/tmp/checkForFormat.sh',
+                              content = StaticFile('checkForFormat.sh'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', 'sh /tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+                              path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+                              not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/',
+                              )
+    self.assertResourceCalled('Execute', 'mkdir -p /var/run/hadoop/hdfs/namenode/formatted/',
+                              )
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/nn.service.keytab nn/c6401.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+                              tries = 40,
+                              only_if = None,
+                              try_sleep = 10,
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/tmp',
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0777,
+                              owner = 'hdfs',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0770,
+                              owner = 'ambari-qa',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              action = ['create'],
+                              only_if = None,
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/nn.service.keytab nn/c6401.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def test_start_ha_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "start",
+                       config_file="ha_default.json"
+    )
+    self.assert_configure_default()
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+                              tries = 40,
+                              only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+                              try_sleep = 10,
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/tmp',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0777,
+                              owner = 'hdfs',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0770,
+                              owner = 'ambari-qa',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              action = ['create'],
+                              only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+                              )
+    self.assertNoMoreResources()
+
+  def test_start_ha_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+                       classname = "NameNode",
+                       command = "start",
+                       config_file="ha_secured.json"
+    )
+    self.assert_configure_default()
+    self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
+                              owner = 'hdfs',
+                              content = Template('exclude_hosts_list.j2'),
+                              group = 'hadoop',
+                              )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/nn.service.keytab nn/c6401.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+                              tries = 40,
+                              only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+                              try_sleep = 10,
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/tmp',
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0777,
+                              owner = 'hdfs',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0770,
+                              owner = 'ambari-qa',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              action = ['create'],
+                              only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              recursive = True,
+                              mode = 0755,
+                              )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namenode',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              recursive = True,
+                              mode = 0755,
+                              )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
new file mode 100644
index 0000000..7693d3f
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+
+class TestSNamenode(RMFTestCase):
+
+  def test_configure_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "configure",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "start",
+                       config_file="default.json"
+    )
+    self.assert_configure_default()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "stop",
+                       config_file="default.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "configure",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "start",
+                       config_file="secured.json"
+    )
+    self.assert_configure_secured()
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/nn.service.keytab nn/c6401.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start secondarynamenode',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+                       classname = "SNameNode",
+                       command = "stop",
+                       config_file="secured.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'kinit -kt /etc/security/keytabs/nn.service.keytab nn/c6401.ambari.apache.org@EXAMPLE.COM',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-secondarynamenode.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namesecondary',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755,
+                              recursive = True,
+                              )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/hadoop/hdfs/namesecondary',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              mode = 0755,
+                              recursive = True,
+                              )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5a02039/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
new file mode 100644
index 0000000..b258c5d
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+
+
+class TestZkfc(RMFTestCase):
+
+  def test_start_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/zkfc_slave.py",
+                       classname = "ZkfcSlave",
+                       command = "start",
+                       config_file="ha_default.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertNoMoreResources()
+
+
+  def test_stop_default(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/zkfc_slave.py",
+                       classname = "ZkfcSlave",
+                       command = "stop",
+                       config_file="ha_default.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()
+
+  def test_start_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/zkfc_slave.py",
+                       classname = "ZkfcSlave",
+                       command = "start",
+                       config_file="ha_secured.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start zkfc',
+                              not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid` >/dev/null 2>&1',
+                              user = 'hdfs',
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_secured(self):
+    self.executeScript("2.0.6/services/HDFS/package/scripts/zkfc_slave.py",
+                       classname = "ZkfcSlave",
+                       command = "stop",
+                       config_file="ha_secured.json"
+    )
+    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
+                              owner = 'hdfs',
+                              recursive = True,
+                              )
+    self.assertResourceCalled('Execute', 'true',
+                              )
+    self.assertResourceCalled('Execute', 'ulimit -c unlimited;  export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc',
+                              not_if = None,
+                              user = 'hdfs',
+                              )
+    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-zkfc.pid',
+                              action = ['delete'],
+                              ignore_failures = True,
+                              )
+    self.assertNoMoreResources()