You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2015/05/22 19:38:10 UTC

[2/2] ambari git commit: AMBARI-11347. Optimize creating hdfs resources by using Webhdfs instead of hadoop fs jar calls (aonishuk)

AMBARI-11347. Optimize creating hdfs resources by using Webhdfs instead of hadoop fs jar calls (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aa51bd75
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aa51bd75
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aa51bd75

Branch: refs/heads/trunk
Commit: aa51bd75b88a5d8e8d7bdec7240c6b561a94805d
Parents: e79cf4e
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri May 22 20:36:55 2015 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Fri May 22 20:36:55 2015 +0300

----------------------------------------------------------------------
 .../libraries/functions/jmx.py                  |  31 ++
 .../libraries/functions/namenode_ha_utils.py    | 117 ++++++
 .../libraries/providers/hdfs_resource.py        | 352 +++++++++++++++++--
 .../libraries/resources/hdfs_resource.py        |   5 +
 .../server/upgrade/UpgradeCatalog210.java       |  34 ++
 .../1.6.1.2.2.0/package/scripts/params.py       |  12 +-
 .../0.1.0/package/scripts/params.py             |  11 +-
 .../0.5.0.2.1/package/scripts/params_linux.py   |  11 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |  11 +-
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |   4 +-
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |   6 +-
 .../package/scripts/namenode_ha_state.py        |   2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  10 +-
 .../HDFS/2.1.0.2.0/package/scripts/utils.py     |  11 -
 .../0.12.0.2.0/package/scripts/params_linux.py  |  11 +-
 .../MAHOUT/1.0.0.2.3/package/scripts/params.py  |  11 +-
 .../4.0.0.2.0/package/scripts/params_linux.py   |  13 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |  10 +-
 .../0.12.0.2.0/package/scripts/service_check.py |   8 +-
 .../SPARK/1.2.0.2.2/package/scripts/params.py   |   9 +-
 .../0.4.0.2.1/package/scripts/params_linux.py   |  12 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  12 +-
 .../services/HDFS/configuration/hadoop-env.xml  |   4 +-
 .../services/HDFS/configuration/hadoop-env.xml  |   4 +-
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |  21 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |  99 +++++-
 .../stacks/2.0.6/HDFS/test_service_check.py     |  14 +-
 .../stacks/2.0.6/HIVE/test_hive_server.py       |  28 +-
 .../2.0.6/HIVE/test_hive_service_check.py       |  30 +-
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |  42 ++-
 .../stacks/2.0.6/OOZIE/test_service_check.py    |  10 +-
 .../stacks/2.0.6/PIG/test_pig_service_check.py  |  42 ++-
 .../stacks/2.0.6/YARN/test_historyserver.py     |  22 +-
 .../2.0.6/YARN/test_mapreduce2_service_check.py |  12 +-
 .../stacks/2.0.6/YARN/test_resourcemanager.py   |   6 +-
 .../stacks/2.1/FALCON/test_falcon_server.py     |   6 +-
 .../python/stacks/2.1/TEZ/test_service_check.py |   6 +-
 .../stacks/2.2/PIG/test_pig_service_check.py    |  42 ++-
 .../stacks/2.2/SPARK/test_job_history_server.py |  14 +-
 .../2.3/MAHOUT/test_mahout_service_check.py     |   8 +-
 .../src/test/python/stacks/utils/RMFTestCase.py |   2 +-
 41 files changed, 931 insertions(+), 184 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py b/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
new file mode 100644
index 0000000..b32f6aa
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/jmx.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import urllib2
+import json
+
+def get_value_from_jmx(qry, property):
+  try:
+    response = urllib2.urlopen(qry)
+    data = response.read()
+    if data:
+      data_dict = json.loads(data)
+      return data_dict["beans"][0][property]
+  except:
+    return None
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
new file mode 100644
index 0000000..ab53ba7
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from resource_management.libraries.script import UnknownConfiguration
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.jmx import get_value_from_jmx
+from resource_management.core.base import Fail
+__all__ = ["get_namenode_states", "get_active_namenode", "get_property_for_active_namenode"]
+
+HDFS_NN_STATE_ACTIVE = 'active'
+HDFS_NN_STATE_STANDBY = 'standby'
+
+NAMENODE_HTTP_FRAGMENT = 'dfs.namenode.http-address.{0}.{1}'
+NAMENODE_HTTPS_FRAGMENT = 'dfs.namenode.https-address.{0}.{1}'
+JMX_URI_FRAGMENT = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
+  
+def get_namenode_states(hdfs_site):
+  """
+  return format [('nn1', 'hdfs://hostname1:port1'), ('nn2', 'hdfs://hostname2:port2')] , [....], [....]
+  """
+  active_namenodes = []
+  standby_namenodes = []
+  unknown_namenodes = []
+  
+  name_service = hdfs_site['dfs.nameservices']
+  nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
+
+  # now we have something like 'nn1,nn2,nn3,nn4'
+  # turn it into dfs.namenode.[property].[dfs.nameservices].[nn_unique_id]
+  # ie dfs.namenode.http-address.hacluster.nn1
+  nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
+  for nn_unique_id in nn_unique_ids:
+    is_https_enabled = hdfs_site['dfs.https.enabled'] if not is_empty(hdfs_site['dfs.https.enabled']) else False
+    
+    if not is_https_enabled:
+      key = NAMENODE_HTTP_FRAGMENT.format(name_service,nn_unique_id)
+    else:
+      key = "https://" + NAMENODE_HTTPS_FRAGMENT.format(name_service,nn_unique_id)
+
+    if key in hdfs_site:
+      # use str() to ensure that unicode strings do not have the u' in them
+      value = str(hdfs_site[key])
+
+      jmx_uri = JMX_URI_FRAGMENT.format(value)
+      state = get_value_from_jmx(jmx_uri,'State')
+      
+      if state == HDFS_NN_STATE_ACTIVE:
+        active_namenodes.append((nn_unique_id, value))
+      elif state == HDFS_NN_STATE_STANDBY:
+        standby_namenodes.append((nn_unique_id, value))
+      else:
+        unknown_namenodes.append((nn_unique_id, value))
+        
+  return active_namenodes, standby_namenodes, unknown_namenodes
+
+def is_ha_enabled(hdfs_site):
+  dfs_ha_nameservices = hdfs_site['dfs.nameservices']
+  
+  if is_empty(dfs_ha_nameservices):
+    return False
+  
+  dfs_ha_namenode_ids = hdfs_site[format("dfs.ha.namenodes.{dfs_ha_nameservices}")]
+  
+  if not is_empty(dfs_ha_namenode_ids):
+    dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+    dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+    if dfs_ha_namenode_ids_array_len > 1:
+      return True
+      
+  return False
+
+def get_active_namenode(hdfs_site):
+  """
+  return format is nn_unique_id and it's address ('nn1', 'hdfs://hostname1:port1')
+  """
+  active_namenodes = get_namenode_states(hdfs_site)[0]
+  if active_namenodes:
+    return active_namenodes[0]
+  else:
+    return UnknownConfiguration('fs_root')
+  
+def get_property_for_active_namenode(hdfs_site, property_name):
+  """
+  For dfs.namenode.rpc-address:
+    - In non-ha mode it will return hdfs_site[dfs.namenode.rpc-address]
+    - In ha-mode it will return hdfs_site[dfs.namenode.rpc-address.nnha.nn2], where nnha is the name of HA, and nn2 is id of active NN
+  """
+  if is_ha_enabled(hdfs_site):
+    name_service = hdfs_site['dfs.nameservices']
+    active_namenodes = get_namenode_states(hdfs_site)[0]
+    
+    if not len(active_namenodes):
+      raise Fail("There is no active namenodes.")
+    
+    active_namenode_id = active_namenodes[0][0]
+    
+    return hdfs_site[format("{property_name}.{name_service}.{active_namenode_id}")]
+  else:
+    return hdfs_site[property_name]
+  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
index 21ad8ed..260e81e 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_resource.py
@@ -20,13 +20,18 @@ Ambari Agent
 
 """
 import json
+import re
+import os
 from resource_management.core.environment import Environment
 from resource_management.core.base import Fail
 from resource_management.core.resources.system import Execute
 from resource_management.core.resources.system import File
 from resource_management.core.providers import Provider
 from resource_management.core.logger import Logger
+from resource_management.core import shell
 from resource_management.libraries.functions import format
+from resource_management.libraries.functions import is_empty
+from resource_management.libraries.functions import namenode_ha_utils
 
 
 JSON_PATH = '/var/lib/ambari-agent/data/hdfs_resources.json'
@@ -45,61 +50,57 @@ RESOURCE_TO_JSON_FIELDS = {
   'change_permissions_for_parents': 'changePermissionforParents'
 }
 
-
-class HdfsResourceProvider(Provider):
-  def action_delayed(self, action_name):
+class HdfsResourceJar:
+  """
+  This is slower than HdfsResourceWebHDFS implementation of HdfsResouce, but it works in any cases on any DFS types.
+  
+  The idea is to put all the files/directories/copyFromLocals we have to create/delete into a json file.
+  And then create in it with ONLY ONE expensive hadoop call to our custom jar fast-hdfs-resource.jar which grabs this json.
+  
+  'create_and_execute' and 'delete_on_execute' does nothing but add files/directories to this json,
+  while execute does all the expensive creating/deleting work executing the jar with the json as parameter.
+  """
+  def action_delayed(self, action_name, main_resource):
     resource = {}
     env = Environment.get_instance()
     if not 'hdfs_files' in env.config:
       env.config['hdfs_files'] = []
 
-    # Check required parameters
-    if not self.resource.type or not self.resource.action:
-      raise Fail("Resource parameter type or action is not set.")
-
     # Put values in dictionary-resource
     for field_name, json_field_name in RESOURCE_TO_JSON_FIELDS.iteritems():
       if field_name == 'action':
         resource[json_field_name] = action_name
-      elif field_name == 'mode' and self.resource.mode:
-        resource[json_field_name] = oct(self.resource.mode)[1:]
-      elif getattr(self.resource, field_name):
-        resource[json_field_name] = getattr(self.resource, field_name)
+      elif field_name == 'mode' and main_resource.resource.mode:
+        resource[json_field_name] = oct(main_resource.resource.mode)[1:]
+      elif getattr(main_resource.resource, field_name):
+        resource[json_field_name] = getattr(main_resource.resource, field_name)
 
     # Add resource to create
     env.config['hdfs_files'].append(resource)
-
-  def action_create_on_execute(self):
-    self.action_delayed("create")
-
-  def action_delete_on_execute(self):
-    self.action_delayed("delete")
-
-  def action_execute(self):
+    
+  def action_execute(self, main_resource):
     env = Environment.get_instance()
 
     # Check required parameters
-    if not self.resource.user:
-      raise Fail("Resource parameter 'user' is not set.")
+    main_resource.assert_parameter_is_set('user')
 
     if not 'hdfs_files' in env.config or not env.config['hdfs_files']:
       Logger.info("No resources to create. 'create_on_execute' or 'delete_on_execute' wasn't triggered before this 'execute' action.")
       return
-
-    hadoop_bin_dir = self.resource.hadoop_bin_dir
-    hadoop_conf_dir = self.resource.hadoop_conf_dir
-    user = self.resource.user
-    security_enabled = self.resource.security_enabled
-    keytab_file = self.resource.keytab
-    kinit_path = self.resource.kinit_path_local
-    logoutput = self.resource.logoutput
+    
+    hadoop_bin_dir = main_resource.resource.hadoop_bin_dir
+    hadoop_conf_dir = main_resource.resource.hadoop_conf_dir
+    user = main_resource.resource.user
+    security_enabled = main_resource.resource.security_enabled
+    keytab_file = main_resource.resource.keytab
+    kinit_path = main_resource.resource.kinit_path_local
+    logoutput = main_resource.resource.logoutput
+    principal_name = main_resource.resource.principal_name
     jar_path=JAR_PATH
     json_path=JSON_PATH
 
     if security_enabled:
-      Execute(format("{kinit_path} -kt {keytab_file} {hdfs_principal_name}"),
-              user=user
-      )
+      self.kinit()
 
     # Write json file to disk
     File(JSON_PATH,
@@ -116,3 +117,290 @@ class HdfsResourceProvider(Provider):
 
     # Clean
     env.config['hdfs_files'] = []
+
+class WebHDFSUtil:
+  def __init__(self, address, run_user, logoutput, security_enabled, kinit_function):
+    self.address = address
+    self.run_user = run_user
+    self.logoutput = logoutput
+    self.security_enabled = security_enabled
+    self.kinit_function = kinit_function
+    
+  def parse_path(self, path):
+    """
+    hdfs://nn_url:1234/a/b/c -> /a/b/c
+    hdfs://nn_ha_name/a/b/c -> /a/b/c
+    hdfs:///a/b/c -> /a/b/c
+    /a/b/c -> /a/b/c
+    """
+    math_with_protocol_and_nn_url = re.match("[a-zA-Z]+://[^/]+(/.+)", path)
+    math_with_protocol = re.match("[a-zA-Z]+://(/.+)", path)
+    
+    if math_with_protocol_and_nn_url:
+      path = math_with_protocol_and_nn_url.group(1)
+    elif math_with_protocol:
+      path = math_with_protocol.group(1)
+    else:
+      path = path
+      
+    return re.sub("[/]+", "/", path)
+    
+  valid_status_codes = ["200", "201", "500"]
+  def run_command(self, target, operation, method='POST', assertable_result=True, file_to_put=None, ignore_status_codes=[], **kwargs):
+    """
+    assertable_result - some POST requests return '{"boolean":false}' or '{"boolean":true}'
+    depending on if query was successful or not, we can assert this for them
+    """
+    target = self.parse_path(target)
+    
+    url = format("{address}/webhdfs/v1{target}?op={operation}&user.name={run_user}", address=self.address, run_user=self.run_user)
+    for k,v in kwargs.iteritems():
+      url = format("{url}&{k}={v}")
+    
+    if file_to_put and not os.path.exists(file_to_put):
+      raise Fail(format("File {file_to_put} is not found."))
+    
+    cmd = ["curl", "-L", "-w", "%{http_code}", "-X", method]
+    
+    if file_to_put:
+      cmd += ["-T", file_to_put]
+    if self.security_enabled:
+      self.kinit_function()
+      cmd += ["--negotiate", "-u", ":"]
+      
+    cmd.append(url)
+    _, out = shell.checked_call(cmd, user=self.run_user, logoutput=self.logoutput, quiet=False)
+    status_code = out[-3:]
+    out = out[:-3] # remove last line from output which is status code
+    
+    try:
+      result_dict = json.loads(out)
+    except ValueError:
+      result_dict = out
+          
+    if status_code not in WebHDFSUtil.valid_status_codes+ignore_status_codes or assertable_result and not result_dict['boolean']:
+      formatted_output = json.dumps(result_dict, indent=2) if isinstance(result_dict, dict) else result_dict
+      err_msg = "Execution of '%s' returned status_code=%s. %s" % (shell.string_cmd_from_args_list(cmd), status_code, formatted_output)
+      raise Fail(err_msg)
+    
+    return result_dict
+    
+class HdfsResourceWebHDFS:
+  """
+  This is the fastest implementation of HdfsResource using WebHDFS.
+  Since it's not available on non-hdfs FS and also can be disabled in scope of HDFS. 
+  We should still have the other implementations for such a cases.
+  """
+  def action_execute(self, main_resource):
+    pass
+  
+  def _assert_valid(self):
+    source = self.main_resource.resource.source
+    type = self.main_resource.resource.type
+    target = self.main_resource.resource.target
+    
+    if source:
+      if not os.path.exists(source):
+        raise Fail(format("Source {source} doesn't exist"))
+      if type == "directory" and os.path.isfile(source):
+        raise Fail(format("Source {source} is file but type is {type}"))
+      elif type == "file" and os.path.isdir(source): 
+        raise Fail(format("Source {source} is directory but type is {type}"))
+    
+    self.target_status = self._get_file_status(target)
+    
+    if self.target_status and self.target_status['type'].lower() != type:
+      raise Fail(format("Trying to create file/directory but directory/file exists in the DFS on {target}"))
+    
+  def action_delayed(self, action_name, main_resource):
+    main_resource.assert_parameter_is_set('user')
+    
+    address = main_resource.https_nn_address if main_resource.is_https_enabled else main_resource.http_nn_address
+    protocol = "https" if main_resource.is_https_enabled else "http"
+    
+    self.util = WebHDFSUtil(format("{protocol}://{address}"), main_resource.resource.user, 
+                            main_resource.resource.logoutput, main_resource.resource.security_enabled,
+                            main_resource.kinit)
+    self.mode = oct(main_resource.resource.mode)[1:] if main_resource.resource.mode else main_resource.resource.mode
+    self.mode_set = False
+    self.main_resource = main_resource
+    self._assert_valid()
+        
+    if action_name == "create":
+      self._create_resource()
+      self._set_mode(self.target_status)
+      self._set_owner(self.target_status)
+    else:
+      self._delete_resource()
+    
+  def _create_resource(self):
+    is_create = (self.main_resource.resource.source == None)
+    
+    if is_create and self.main_resource.resource.type == "directory":
+      self._create_directory(self.main_resource.resource.target)
+    elif is_create and self.main_resource.resource.type == "file":
+      self._create_file(self.main_resource.target, mode=self.mode)
+    elif not is_create and self.main_resource.resource.type == "file":
+      self._create_file(self.main_resource.resource.target, source=self.main_resource.resource.source, mode=self.mode)
+    elif not is_create and self.main_resource.resource.type == "directory":
+      self._create_directory(self.main_resource.resource.target)
+      self._copy_from_local_directory(self.main_resource.resource.target, self.main_resource.resource.source)
+    
+  def _copy_from_local_directory(self, target, source):
+    for next_path_part in os.listdir(source):
+      new_source = os.path.join(source, next_path_part)
+      new_target = format("{target}/{next_path_part}")
+      if os.path.isdir(new_source):
+        Logger.info(format("Creating DFS directory {new_target}"))
+        self._create_directory(new_target)
+        self._copy_from_local_directory(new_target, new_source)
+      else:
+        self._create_file(new_target, new_source)
+  
+  def _create_directory(self, target):
+    if target == self.main_resource.resource.target and self.target_status:
+      return
+    
+    self.util.run_command(target, 'MKDIRS', method='PUT')
+    
+  def _get_file_status(self, target):
+    list_status = self.util.run_command(target, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
+    return list_status['FileStatus'] if 'FileStatus' in list_status else None
+    
+  def _create_file(self, target, source=None, mode=""):
+    """
+    PUT file command in slow, however _get_file_status is pretty fast,
+    so we should check if the file really should be put before doing it.
+    """
+    file_status = self._get_file_status(target) if target!=self.main_resource.resource.target else self.target_status
+    mode = "" if not mode else mode
+    
+    if file_status:
+      if source:
+        length = file_status['length']
+        local_file_size = os.stat(source).st_size # TODO: os -> sudo
+        
+        # TODO: re-implement this using checksums
+        if local_file_size == length:
+          Logger.info(format("DFS file {target} is identical to {source}, skipping the copying"))
+          return
+      else:
+        Logger.info(format("File {target} already exists in DFS, skipping the creation"))
+        return
+    
+    Logger.info(format("Creating new file {target} in DFS"))
+    kwargs = {'permission': mode} if mode else {}
+      
+    self.util.run_command(target, 'CREATE', method='PUT', overwrite=True, assertable_result=False, file_to_put=source, **kwargs)
+    
+    if mode and file_status:
+      file_status['permission'] = mode
+    
+     
+  def _delete_resource(self):
+    if not self.target_status:
+          return
+    self.util.run_command(self.main_resource.resource.target, 'DELETE', method='DELETE', recursive=True)
+
+  def _set_owner(self, file_status=None):
+    owner = "" if not self.main_resource.resource.owner else self.main_resource.resource.owner
+    group = "" if not self.main_resource.resource.group else self.main_resource.resource.group
+    
+    if (not owner or file_status and file_status['owner'] == owner) and (not group or file_status and file_status['group'] == group):
+      return
+    
+    self.util.run_command(self.main_resource.resource.target, 'SETOWNER', method='PUT', owner=owner, group=group, assertable_result=False)
+    
+    results = []
+    
+    if self.main_resource.resource.recursive_chown:
+      self._fill_directories_list(self.main_resource.resource.target, results)
+    if self.main_resource.resource.change_permissions_for_parents:
+      self._fill_in_parent_directories(self.main_resource.resource.target, results)
+      
+    for path in results:
+      self.util.run_command(path, 'SETOWNER', method='PUT', owner=owner, group=group, assertable_result=False)
+  
+  def _set_mode(self, file_status=None):
+    if not self.mode or file_status and file_status['permission'] == self.mode:
+      return
+    
+    if not self.mode_set:
+      self.util.run_command(self.main_resource.resource.target, 'SETPERMISSION', method='PUT', permission=self.mode, assertable_result=False)
+    
+    results = []
+    
+    if self.main_resource.resource.recursive_chmod:
+      self._fill_directories_list(self.main_resource.resource.target, results)
+    if self.main_resource.resource.change_permissions_for_parents:
+      self._fill_in_parent_directories(self.main_resource.resource.target, results)
+      
+    for path in results:
+      self.util.run_command(path, 'SETPERMISSION', method='PUT', permission=self.mode, assertable_result=False)
+    
+    
+  def _fill_in_parent_directories(self, target, results):
+    path_parts = self.util.parse_path(target).split("/") 
+    path = "/"
+    
+    for path_part in path_parts:
+      path += path_part + "/"
+      results.append(path)
+      
+  def _fill_directories_list(self, target, results):
+    list_status = self.util.run_command(target, 'LISTSTATUS', method='GET', assertable_result=False)['FileStatuses']['FileStatus']
+    
+    for file in list_status:
+      if file['pathSuffix']:
+        new_path = target + "/" + file['pathSuffix']
+        results.add(new_path)
+        
+        if file['type'] == 'DIRECTORY':
+          _fill_directories_list(new_path, results)  
+    
+class HdfsResourceProvider(Provider):
+  def __init__(self, resource):
+    super(HdfsResourceProvider,self).__init__(resource)
+    self.assert_parameter_is_set('hdfs_site')
+    
+    self.webhdfs_enabled = self.resource.hdfs_site['dfs.webhdfs.enabled']
+    self.is_https_enabled = self.resource.hdfs_site['dfs.https.enabled'] if not is_empty(self.resource.hdfs_site['dfs.https.enabled']) else False
+    self.https_nn_address = namenode_ha_utils.get_property_for_active_namenode(self.resource.hdfs_site, 'dfs.namenode.https-address')
+    self.http_nn_address = namenode_ha_utils.get_property_for_active_namenode(self.resource.hdfs_site, 'dfs.namenode.http-address')
+    
+  def action_delayed(self, action_name):
+    self.assert_parameter_is_set('type')
+
+    self.get_hdfs_resource_executor().action_delayed(action_name, self)
+
+  def action_create_on_execute(self):
+    self.action_delayed("create")
+
+  def action_delete_on_execute(self):
+    self.action_delayed("delete")
+
+  def action_execute(self):
+    self.get_hdfs_resource_executor().action_execute(self)
+
+  def get_hdfs_resource_executor(self):
+    # only hdfs seems to support webHDFS
+    if self.webhdfs_enabled and self.resource.default_fs.startswith("hdfs"):
+      return HdfsResourceWebHDFS()
+    else:
+      return HdfsResourceJar()
+  
+  def assert_parameter_is_set(self, parameter_name):
+    if not getattr(self.resource, parameter_name):
+      raise Fail("Resource parameter '{0}' is not set.".format(parameter_name))
+    return True
+  
+  def kinit(self):
+    keytab_file = self.resource.keytab
+    kinit_path = self.resource.kinit_path_local
+    principal_name = self.resource.principal_name
+    user = self.resource.user
+    
+    Execute(format("{kinit_path} -kt {keytab_file} {principal_name}"),
+            user=user
+    )    
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py b/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
index 970e1e9..3a991b0 100644
--- a/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
+++ b/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_resource.py
@@ -65,11 +65,16 @@ class HdfsResource(Resource):
   change_permissions_for_parents = BooleanArgument(default=False)
 
   security_enabled = BooleanArgument(default=False)
+  principal_name = ResourceArgument()
   keytab = ResourceArgument()
   kinit_path_local = ResourceArgument()
   user = ResourceArgument()
   hadoop_bin_dir = ResourceArgument()
   hadoop_conf_dir = ResourceArgument()
+  
+  # WebHDFS needs these
+  hdfs_site = ResourceArgument()
+  default_fs = ResourceArgument()
 
   #action 'execute' immediately creates all pending files/directories in efficient manner
   #action 'create_delayed/delete_delayed' adds file/directory to list of pending directories

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
index 69c82ef..bdfbb1a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
@@ -879,6 +879,40 @@ public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
 
   protected void addMissingConfigs() throws AmbariException {
     updateHiveConfigs();
+    updateHdfsConfigs();
+  }
+  
+  protected void updateHdfsConfigs() throws AmbariException {
+    /***
+     * Append -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 to HADOOP_NAMENODE_OPTS from hadoop-env.sh
+     */
+    AmbariManagementController ambariManagementController = injector.getInstance(
+        AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = clusters.getClusters();
+      Map<String, String> prop = new HashMap<String, String>();
+      String content = null;
+
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          content = null;
+          if (cluster.getDesiredConfigByType("hadoop-env") != null) {
+            content = cluster.getDesiredConfigByType(
+                "hadoop-env").getProperties().get("content");
+          }
+
+          if (content != null) {
+            content += "\nexport HADOOP_NAMENODE_OPTS=\"${HADOOP_NAMENODE_OPTS} -Dorg.mortbay.jetty.Request.maxFormContentSize=-1\"";
+
+            prop.put("content", content);
+            updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
+                prop, true, false);
+          }
+        }
+      }
+    }
   }
 
   protected void updateHiveConfigs() throws AmbariException {

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
index 205d012..76f1581 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
@@ -25,6 +25,7 @@ from resource_management.libraries.functions.default import default
 from resource_management.libraries.script.script import Script
 
 
+
 import status_params
 
 # server configurations
@@ -148,6 +149,12 @@ hostname = status_params.hostname
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+# dfs.namenode.https-address
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -158,5 +165,8 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
 )

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index 4a63e3c..22fb894 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -23,6 +23,7 @@ from resource_management import *
 import status_params
 from ambari_commons import OSCheck
 
+
 if OSCheck.is_windows_family():
   from params_windows import *
 else:
@@ -184,6 +185,11 @@ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -194,7 +200,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
index 031f5ab..b9b248d 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
@@ -27,6 +27,7 @@ from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
 
+
 config = Script.get_config()
 
 stack_name = default("/hostLevelParams/stack_name", None)
@@ -106,6 +107,11 @@ supports_hive_dr = config['configurations']['falcon-env']['supports_hive_dr']
 local_data_mirroring_dir = "/usr/hdp/current/falcon-server/data-mirroring"
 dfs_data_mirroring_dir = "/apps/data-mirroring"
 
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -116,6 +122,9 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 01f082b..1f79f87 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -34,6 +34,7 @@ from resource_management.libraries.functions import is_empty
 from resource_management.libraries.functions import get_unique_id_and_date
 from resource_management.libraries.script.script import Script
 
+
 from resource_management.libraries.functions.substitute_vars import substitute_vars
 
 # server configurations
@@ -172,6 +173,11 @@ hostname = config["hostname"]
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -182,7 +188,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
 )
 
 # ranger host

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index 3cff24c..52a544f 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
@@ -194,13 +194,13 @@ export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 # Command specific options appended to HADOOP_OPTS when specified
 
 {% if java_version &lt; 8 %}
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1  ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
 {% else %}
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 3900967..923d9df 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -166,13 +166,15 @@ def create_hdfs_directories(check):
                        type="directory",
                        action="create_on_execute",
                        owner=params.hdfs_user,
-                       mode=0777
+                       mode=0777,
+                       only_if=check
   )
   params.HdfsResource(params.smoke_hdfs_user_dir,
                        type="directory",
                        action="create_on_execute",
                        owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode
+                       mode=params.smoke_hdfs_user_mode,
+                       only_if=check
   )
   params.HdfsResource(None, 
                       action="execute",

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py
index 8642ba5..50cfc0c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_ha_state.py
@@ -20,7 +20,7 @@ limitations under the License.
 
 from resource_management.core.logger import Logger
 from resource_management.libraries.functions.default import default
-from utils import get_value_from_jmx
+from resource_management.libraries.functions.jmx import get_value_from_jmx
 
 
 class NAMENODE_STATE:

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index b29eb8e..03ca213 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -37,6 +37,7 @@ from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.format_jvm_option import format_jvm_option
 from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
 
+
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
@@ -292,6 +293,10 @@ else:
   dn_kinit_cmd = ""
   nn_kinit_cmd = ""
   jn_kinit_cmd = ""
+  
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
 
 import functools
 #create partial functions with common arguments for every HdfsResource call
@@ -303,7 +308,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
 )
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
index 21976f4..38270e8 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -256,17 +256,6 @@ def service(action=None, name=None, user=None, options="", create_pid_dir=False,
          action="delete",
     )
 
-
-def get_value_from_jmx(qry, property):
-  try:
-    response = urllib2.urlopen(qry)
-    data = response.read()
-    if data:
-      data_dict = json.loads(data)
-      return data_dict["beans"][0][property]
-  except:
-    return None
-
 def get_jmx_data(nn_address, modeler_type, metric, encrypted=False):
   """
   :param nn_address: Namenode Address, e.g., host:port, ** MAY ** be preceded with "http://" or "https://" already.

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 647892a..d180340 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -36,6 +36,7 @@ from resource_management.libraries.functions.get_port_from_url import get_port_f
 from resource_management.libraries import functions
 
 
+
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -368,6 +369,11 @@ webhcat_hdfs_user_mode = 0755
 #for create_hdfs_directory
 security_param = "true" if security_enabled else "false"
 
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create hdfs directory we need to call params.HdfsResource in code
@@ -378,7 +384,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
index 54f0ba0..bef1e29 100644
--- a/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
@@ -27,6 +27,7 @@ from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
 
+
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -68,6 +69,11 @@ java64_home = config['hostLevelParams']['java_home']
 
 log4j_props = config['configurations']['mahout-log4j']['content']
 
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -78,5 +84,8 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
 )

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index bc168c6..aef90ee 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -193,9 +193,12 @@ oozie_hdfs_user_mode = 0775
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
 import functools
 #create partial functions with common arguments for every HdfsResource call
-#to create hdfs directory we need to call params.HdfsResource in code
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
 HdfsResource = functools.partial(
   HdfsResource,
   user=hdfs_user,
@@ -203,9 +206,11 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
- )
-
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
 
 # The logic for LZO also exists in HDFS' params.py
 io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
index d7ff0ec..075aa7b 100644
--- a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
@@ -69,6 +69,11 @@ pig_properties = config['configurations']['pig-properties']['content']
 
 log4j_props = config['configurations']['pig-log4j']['content']
 
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create hdfs directory we need to call params.HdfsResource in code
@@ -79,6 +84,9 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
index b090a04..d52b787 100644
--- a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
@@ -47,13 +47,13 @@ class PigServiceCheckLinux(PigServiceCheck):
     params.HdfsResource(output_dir,
                         type="directory",
                         action="delete_on_execute",
-                        user=params.smokeuser,
+                        owner=params.smokeuser,
                         )
     params.HdfsResource(input_file,
                         type="file",
                         source="/etc/passwd",
                         action="create_on_execute",
-                        user=params.smokeuser,
+                        owner=params.smokeuser,
     )
     params.HdfsResource(None, action="execute")
  
@@ -85,13 +85,13 @@ class PigServiceCheckLinux(PigServiceCheck):
       params.HdfsResource(output_dir,
                           type="directory",
                           action="delete_on_execute",
-                          user=params.smokeuser,
+                          owner=params.smokeuser,
       )
       params.HdfsResource(input_file,
                           type="file",
                           source="/etc/passwd",
                           action="create_on_execute",
-                          user=params.smokeuser,
+                          owner=params.smokeuser,
       )
 
       # Check for Pig-on-Tez

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
index 348f49d..a197a4d 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
@@ -30,6 +30,7 @@ from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
+
 from resource_management.libraries.script.script import Script
 
 
@@ -152,7 +153,8 @@ if security_enabled:
       'hive.server2.enable.doAs': str(config['configurations']['hive-site']['hive.server2.enable.doAs']).lower()
     })
   
-
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+hdfs_site = config['configurations']['hdfs-site']
 
 
 import functools
@@ -165,5 +167,8 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
index ab29a95..97c9996 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
@@ -27,6 +27,7 @@ from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
 
+
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -73,6 +74,12 @@ tez_user = config['configurations']['tez-env']['tez_user']
 user_group = config['configurations']['cluster-env']['user_group']
 tez_env_sh_template = config['configurations']['tez-env']['content']
 
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
@@ -83,7 +90,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
 )
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index 2198bcc..564f8ef 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -29,6 +29,7 @@ from resource_management.libraries.functions.version import format_hdp_stack_ver
 from resource_management.libraries.functions.default import default
 from resource_management.libraries import functions
 
+
 import status_params
 
 # a map of the Ambari role to the component name
@@ -223,6 +224,12 @@ tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
 #for create_hdfs_directory
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
 import functools
 #create partial functions with common arguments for every HdfsResource call
 #to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
@@ -233,7 +240,10 @@ HdfsResource = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
  )
 update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
index 21b92a2..31dadeb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
@@ -79,14 +79,14 @@ HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC
 HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 
 {% if java_version &lt; 8 %}
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
 
 {% else %}
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS} -Dorg.mortbay.jetty.Request.maxFormContentSize=-1"
 export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml
index 0068e3e..7133c58 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/HDFS/configuration/hadoop-env.xml
@@ -62,7 +62,7 @@ HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logge
 
 {% if java_version &lt; 8 %}
 SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
 
 export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"
@@ -72,7 +72,7 @@ export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_
 
 {% else %}
 SHARED_HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT"
-export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
 
 export HADOOP_SECONDARYNAMENODE_OPTS="${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\" ${HADOOP_SECONDARYNAMENODE_OPTS}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index cc404eb..0c87a80 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -291,7 +291,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
         security_enabled = False,
@@ -303,7 +303,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0711,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -313,7 +313,7 @@ class TestHBaseMaster(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
 
@@ -417,7 +417,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
     )
     self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
         security_enabled = True,
@@ -429,7 +429,7 @@ class TestHBaseMaster(RMFTestCase):
         owner = 'hbase',
         hadoop_bin_dir = '/usr/bin',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0711,
     )
     self.assertResourceCalled('HdfsResource', None,
@@ -439,7 +439,7 @@ class TestHBaseMaster(RMFTestCase):
         
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
 
@@ -543,7 +543,10 @@ class TestHBaseMaster(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://nn1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         owner = 'hbase',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -554,7 +557,10 @@ class TestHBaseMaster(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://nn1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         owner = 'hbase',
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
@@ -566,7 +572,10 @@ class TestHBaseMaster(RMFTestCase):
         security_enabled = False,
         hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
         keytab = UnknownConfigurationMock(),
+        default_fs = 'hdfs://nn1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = UnknownConfigurationMock(),
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa51bd75/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index c57287b..b7126fd 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -102,9 +102,13 @@ class TestNamenode(RMFTestCase):
                               )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = None,
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'wasb://abc@c6401.ambari.apache.org',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -114,9 +118,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = None,
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'wasb://abc@c6401.ambari.apache.org',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -129,13 +137,15 @@ class TestNamenode(RMFTestCase):
         only_if = None,
         keytab = UnknownConfigurationMock(),
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'wasb://abc@c6401.ambari.apache.org',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
-    pass
 
   def test_install_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
@@ -209,9 +219,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = None,
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -221,9 +235,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = None,
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -236,7 +254,10 @@ class TestNamenode(RMFTestCase):
         only_if = None,
         keytab = UnknownConfigurationMock(),
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -345,8 +366,9 @@ class TestNamenode(RMFTestCase):
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0777,
+        only_if = None,
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = True,
@@ -357,8 +379,9 @@ class TestNamenode(RMFTestCase):
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
         type = 'directory',
-        action = ['create_on_execute'],
+        action = ['create_on_execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         mode = 0770,
+        only_if = None,
     )
     self.assertResourceCalled('HdfsResource', None,
         security_enabled = True,
@@ -367,7 +390,7 @@ class TestNamenode(RMFTestCase):
         hadoop_bin_dir = '/usr/bin',
         kinit_path_local = '/usr/bin/kinit',
         user = 'hdfs',
-        action = ['execute'],
+        action = ['execute'], hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
         hadoop_conf_dir = '/etc/hadoop/conf',
     )
     self.assertNoMoreResources()
@@ -437,9 +460,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -449,9 +476,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -464,7 +495,10 @@ class TestNamenode(RMFTestCase):
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -518,9 +552,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = True,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -530,9 +568,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = True,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -545,7 +587,10 @@ class TestNamenode(RMFTestCase):
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = '/etc/security/keytabs/hdfs.headless.keytab',
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = 'hdfs',
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -608,9 +653,13 @@ class TestNamenode(RMFTestCase):
                               )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -620,9 +669,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -635,7 +688,10 @@ class TestNamenode(RMFTestCase):
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active'",
         keytab = UnknownConfigurationMock(),
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -695,9 +751,13 @@ class TestNamenode(RMFTestCase):
                               )
     self.assertResourceCalled('HdfsResource', '/tmp',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'hdfs',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -707,9 +767,13 @@ class TestNamenode(RMFTestCase):
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa',
         security_enabled = False,
-        hadoop_bin_dir = '/usr/bin',
+        only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
         keytab = UnknownConfigurationMock(),
+        hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         owner = 'ambari-qa',
         hadoop_conf_dir = '/etc/hadoop/conf',
@@ -722,7 +786,10 @@ class TestNamenode(RMFTestCase):
         only_if = "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn2 | grep active'",
         keytab = UnknownConfigurationMock(),
         hadoop_bin_dir = '/usr/bin',
+        default_fs = 'hdfs://ns1',
+        hdfs_site = self.getConfig()['configurations']['hdfs-site'],
         kinit_path_local = '/usr/bin/kinit',
+        principal_name = None,
         user = 'hdfs',
         action = ['execute'],
         hadoop_conf_dir = '/etc/hadoop/conf',