You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2015/04/23 14:24:59 UTC

ambari git commit: AMBARI-10692. Hdfs Ranger plugin fails to install with non-root agent (aonishuk)

Repository: ambari
Updated Branches:
  refs/heads/trunk d3aada22d -> 7e44537bd


AMBARI-10692. Hdfs Ranger plugin fails to install with non-root agent (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7e44537b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7e44537b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7e44537b

Branch: refs/heads/trunk
Commit: 7e44537bd9e7d84cb458d20bbf5e948f545dd1f3
Parents: d3aada2
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Apr 23 15:24:48 2015 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Apr 23 15:24:48 2015 +0300

----------------------------------------------------------------------
 .../0.96.0.2.0/package/scripts/params_linux.py  |   4 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   | 119 +++---
 .../package/scripts/setup_ranger_hdfs.py        | 205 ++---------
 .../ranger-hdfs-plugin-properties.xml           | 361 +++++++++++--------
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |  29 ++
 .../stacks/2.0.6/configs/client-upgrade.json    |  33 +-
 .../python/stacks/2.0.6/configs/default.json    |  29 ++
 .../2.0.6/configs/default_no_install.json       |  29 ++
 .../default_update_exclude_file_only.json       |  29 ++
 .../2.0.6/configs/ha_bootstrap_active_node.json |  29 ++
 .../configs/ha_bootstrap_standby_node.json      |  29 ++
 .../python/stacks/2.0.6/configs/ha_default.json |  29 ++
 .../python/stacks/2.0.6/configs/ha_secured.json |  29 ++
 .../python/stacks/2.0.6/configs/nn_ru_lzo.json  |  29 ++
 .../2.0.6/configs/rebalancehdfs_default.json    |  29 ++
 .../2.0.6/configs/rebalancehdfs_secured.json    |  29 ++
 .../python/stacks/2.0.6/configs/secured.json    |  29 ++
 .../src/test/python/stacks/utils/RMFTestCase.py |  33 +-
 18 files changed, 685 insertions(+), 418 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index eb903f4..6a3a1a4 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -205,8 +205,8 @@ hbase_zookeeper_property_clientPort = config['configurations']['hbase-site']['hb
 hbase_security_authentication = config['configurations']['hbase-site']['hbase.security.authentication']
 hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
 
-repo_config_username = default("/configurations/ranger-hbase-plugin-properties/REPOSITORY_CONFIG_USERNAME", "hbase")
-repo_config_password = default("/configurations/ranger-hbase-plugin-properties/REPOSITORY_CONFIG_PASSWORD", "hbase")
+repo_config_username = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+repo_config_password = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
 
 admin_uname = config['configurations']['ranger-env']['admin_username']
 admin_password = config['configurations']['ranger-env']['admin_password']

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 4e0bfed..0747b18 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -23,6 +23,7 @@ from resource_management.libraries.functions.default import default
 from resource_management import *
 import status_params
 import utils
+import json
 import os
 import itertools
 import re
@@ -335,77 +336,79 @@ if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 
 #ranger hdfs properties
-policymgr_mgr_url = default("/configurations/admin-properties/policymgr_external_url", "http://localhost:6080")
-sql_connector_jar = default("/configurations/admin-properties/SQL_CONNECTOR_JAR", "/usr/share/java/mysql-connector-java.jar")
-xa_audit_db_flavor = default("/configurations/admin-properties/DB_FLAVOR", "MYSQL")
-xa_audit_db_name = default("/configurations/admin-properties/audit_db_name", "ranger_audit")
-xa_audit_db_user = default("/configurations/admin-properties/audit_db_user", "rangerlogger")
-xa_audit_db_password = default("/configurations/admin-properties/audit_db_password", "rangerlogger")
-xa_db_host = default("/configurations/admin-properties/db_host", "localhost")
-repo_name = str(config['clusterName']) + '_hadoop'
-db_enabled = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.DB.IS_ENABLED", "false")
-hdfs_enabled = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.IS_ENABLED", "false")
-hdfs_dest_dir = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.DESTINATION_DIRECTORY", "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/app-type/time:yyyyMMdd")
-hdfs_buffer_dir = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY", "__REPLACE__LOG_DIR/hadoop/app-type/audit")
-hdfs_archive_dir = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY", "__REPLACE__LOG_DIR/hadoop/app-type/audit/archive")
-hdfs_dest_file = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.DESTINTATION_FILE", "hostname-audit.log")
-hdfs_dest_flush_int_sec = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS", "900")
-hdfs_dest_rollover_int_sec = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS", "86400")
-hdfs_dest_open_retry_int_sec = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS", "60")
-hdfs_buffer_file = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_FILE", "time:yyyyMMdd-HHmm.ss.log")
-hdfs_buffer_flush_int_sec = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS", "60")
-hdfs_buffer_rollover_int_sec = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS", "600")
-hdfs_archive_max_file_count = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT", "10")
-ssl_keystore_file = default("/configurations/ranger-hdfs-plugin-properties/SSL_KEYSTORE_FILE_PATH", "/etc/hadoop/conf/ranger-plugin-keystore.jks")
-ssl_keystore_password = default("/configurations/ranger-hdfs-plugin-properties/SSL_KEYSTORE_PASSWORD", "myKeyFilePassword")
-ssl_truststore_file = default("/configurations/ranger-hdfs-plugin-properties/SSL_TRUSTSTORE_FILE_PATH", "/etc/hadoop/conf/ranger-plugin-truststore.jks")
-ssl_truststore_password = default("/configurations/ranger-hdfs-plugin-properties/SSL_TRUSTSTORE_PASSWORD", "changeit")
+policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+sql_connector_jar = config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
+xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR']
+xa_audit_db_name = config['configurations']['admin-properties']['audit_db_name']
+xa_audit_db_user = config['configurations']['admin-properties']['audit_db_user']
+xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password']
+xa_db_host = config['configurations']['admin-properties']['db_host']
+repo_name = str(config['clusterName']) + '_hdfs'
 
 hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
 hadoop_security_authorization = config['configurations']['core-site']['hadoop.security.authorization']
 fs_default_name = config['configurations']['core-site']['fs.defaultFS']
 hadoop_security_auth_to_local = config['configurations']['core-site']['hadoop.security.auth_to_local']
-hadoop_rpc_protection = default("/configurations/ranger-hdfs-plugin-properties/hadoop.rpc.protection", "-")
-common_name_for_certificate = default("/configurations/ranger-hdfs-plugin-properties/common.name.for.certificate", "-")
+hadoop_rpc_protection = config['configurations']['ranger-hdfs-plugin-properties']['hadoop.rpc.protection']
+common_name_for_certificate = config['configurations']['ranger-hdfs-plugin-properties']['common.name.for.certificate']
 
-repo_config_username = default("/configurations/ranger-hdfs-plugin-properties/REPOSITORY_CONFIG_USERNAME", "hadoop")
-repo_config_password = default("/configurations/ranger-hdfs-plugin-properties/REPOSITORY_CONFIG_PASSWORD", "hadoop")
+repo_config_username = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+repo_config_password = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
 
 if security_enabled:
   sn_principal_name = default("/configurations/hdfs-site/dfs.secondary.namenode.kerberos.principal", "nn/_HOST@EXAMPLE.COM")
   sn_principal_name = sn_principal_name.replace('_HOST',hostname.lower())
 
-admin_uname = default("/configurations/ranger-env/admin_username", "admin")
-admin_password = default("/configurations/ranger-env/admin_password", "admin")
-admin_uname_password = format("{admin_uname}:{admin_password}")
+admin_uname = config['configurations']['ranger-env']['admin_username']
+admin_password = config['configurations']['ranger-env']['admin_password']
 
-ambari_ranger_admin = default("/configurations/ranger-env/ranger_admin_username", "amb_ranger_admin")
-ambari_ranger_password = default("/configurations/ranger-env/ranger_admin_password", "ambari123")
-policy_user = default("/configurations/ranger-hdfs-plugin-properties/policy_user", "ambari-qa")
+ambari_ranger_admin = config['configurations']['ranger-env']['ranger_admin_username']
+ambari_ranger_password = config['configurations']['ranger-env']['ranger_admin_password']
+policy_user = config['configurations']['ranger-hbase-plugin-properties']['policy_user']
 
 #For curl command in ranger plugin to get db connector
 jdk_location = config['hostLevelParams']['jdk_location']
 java_share_dir = '/usr/share/java'
-if xa_audit_db_flavor and xa_audit_db_flavor.lower() == 'mysql':
-  jdbc_symlink_name = "mysql-jdbc-driver.jar"
-  jdbc_jar_name = "mysql-connector-java.jar"
-elif xa_audit_db_flavor and xa_audit_db_flavor.lower() == 'oracle':
-  jdbc_jar_name = "ojdbc6.jar"
-  jdbc_symlink_name = "oracle-jdbc-driver.jar"
-elif xa_audit_db_flavor and xa_audit_db_flavor.lower() == 'postgres':
-  jdbc_jar_name = "postgresql.jar"
-  jdbc_symlink_name = "postgres-jdbc-driver.jar"
-elif xa_audit_db_flavor and xa_audit_db_flavor.lower() == 'sqlserver':
-  jdbc_jar_name = "sqljdbc4.jar"
-  jdbc_symlink_name = "mssql-jdbc-driver.jar"
-
-downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
-
-driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
-driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.3') >= 0:
-  solr_enabled = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.SOLR.IS_ENABLED", "false")
-  solr_max_queue_size = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.SOLR.MAX_QUEUE_SIZE", "1")
-  solr_max_flush_interval = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS", "1000")
-  solr_url = default("/configurations/ranger-hdfs-plugin-properties/XAAUDIT.SOLR.SOLR_URL", "http://localhost:6083/solr/ranger_audits")
+if has_ranger_admin:
+  if xa_audit_db_flavor.lower() == 'mysql':
+    jdbc_symlink_name = "mysql-jdbc-driver.jar"
+    jdbc_jar_name = "mysql-connector-java.jar"
+  elif xa_audit_db_flavor.lower() == 'oracle':
+    jdbc_jar_name = "ojdbc6.jar"
+    jdbc_symlink_name = "oracle-jdbc-driver.jar"
+  elif nxa_audit_db_flavor.lower() == 'postgres':
+    jdbc_jar_name = "postgresql.jar"
+    jdbc_symlink_name = "postgres-jdbc-driver.jar"
+  elif xa_audit_db_flavor.lower() == 'sqlserver':
+    jdbc_jar_name = "sqljdbc4.jar"
+    jdbc_symlink_name = "mssql-jdbc-driver.jar"
+
+  downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
+  
+  driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
+  driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+  
+
+
+hdfs_ranger_plugin_config = {
+  'username': repo_config_username,
+  'password': repo_config_password,
+  'hadoop.security.authentication': hadoop_security_authentication,
+  'hadoop.security.authorization': hadoop_security_authorization,
+  'fs.default.name': fs_default_name,
+  'hadoop.security.auth_to_local': hadoop_security_auth_to_local,
+  'hadoop.rpc.protection': hadoop_rpc_protection,
+  'commonNameForCertificate': common_name_for_certificate,
+  'dfs.datanode.kerberos.principal': dn_principal_name if security_enabled else '',
+  'dfs.namenode.kerberos.principal': nn_principal_name if security_enabled else '',
+  'dfs.secondary.namenode.kerberos.principal': sn_principal_name if security_enabled else ''
+}
+
+hdfs_ranger_plugin_repo = {
+  'isActive': 'true',
+  'config': json.dumps(hdfs_ranger_plugin_config),
+  'description': 'hdfs repo',
+  'name': repo_name,
+  'repositoryType': 'hdfs',
+  'assetType': '1'
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
index 8add904..595fc3f 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
@@ -30,189 +30,44 @@ from resource_management.libraries.functions.version import format_hdp_stack_ver
 
 def setup_ranger_hdfs():
   import params
-
+  
   if params.has_ranger_admin:
     File(params.downloaded_custom_connector,
          content = DownloadSource(params.driver_curl_source)
     )
 
-    if not os.path.isfile(params.driver_curl_target):
-      Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.driver_curl_target),
-              path=["/bin", "/usr/bin/"],
-              sudo=True)
-
-    try:
-      command = 'hdp-select status hadoop-client'
-      return_code, hdp_output = shell.call(command, timeout=20)
-    except Exception, e:
-      Logger.error(str(e))
-      raise Fail('Unable to execute hdp-select command to retrieve the version.')
-
-    if return_code != 0:
-      raise Fail(
-        'Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
-
-    hdp_version = re.sub('hadoop-client - ', '', hdp_output).strip()
-    match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
-
-    if match is None:
-      raise Fail('Failed to get extracted version')
+    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.driver_curl_target),
+            path=["/bin", "/usr/bin/"],
+            not_if=format("test -f {driver_curl_target}"),
+            sudo=True)
 
-    file_path = '/usr/hdp/' + hdp_version + '/ranger-hdfs-plugin/install.properties'
+    hdp_version = get_hdp_version('hadoop-client')
+    file_path = format('/usr/hdp/{hdp_version}/ranger-hdfs-plugin/install.properties')
+    
     if not os.path.isfile(file_path):
-      raise Fail('Ranger HDFS plugin install.properties file does not exist at {0}'.format(file_path))
-
-    ranger_hdfs_dict = ranger_hdfs_properties()
-    hdfs_repo_data = hdfs_repo_properties()
-
-    if os.path.isfile(file_path):
-      write_properties_to_file(file_path, ranger_hdfs_dict)
+      raise Fail(format('Ranger HBase plugin install.properties file does not exist at {file_path}'))
+    
+    ModifyPropertiesFile(file_path,
+      properties = params.config['configurations']['ranger-hdfs-plugin-properties']
+    )
 
     if params.enable_ranger_hdfs:
-      cmd = format('cd /usr/hdp/{hdp_version}/ranger-hdfs-plugin/ && sh enable-hdfs-plugin.sh')
-      ranger_adm_obj = Rangeradmin(url=ranger_hdfs_dict['POLICY_MGR_URL'])
-      response_code, response_recieved = ranger_adm_obj.check_ranger_login_urllib2(
-        ranger_hdfs_dict['POLICY_MGR_URL'] + '/login.jsp', 'test:test')
-
-      if response_code is not None and response_code == 200:
-        ambari_ranger_admin, ambari_ranger_password = ranger_adm_obj.create_ambari_admin_user(params.ambari_ranger_admin, params.ambari_ranger_password, params.admin_uname_password)
-        ambari_username_password_for_ranger = ambari_ranger_admin + ':' + ambari_ranger_password
-        if ambari_ranger_admin != '' and ambari_ranger_password != '':
-          repo = ranger_adm_obj.get_repository_by_name_urllib2(ranger_hdfs_dict['REPOSITORY_NAME'], 'hdfs', 'true', ambari_username_password_for_ranger)
-          if repo and repo['name'] == ranger_hdfs_dict['REPOSITORY_NAME']:
-            Logger.info('HDFS Repository exist')
-          else:
-            response = ranger_adm_obj.create_repository_urllib2(hdfs_repo_data, ambari_username_password_for_ranger, params.policy_user)
-            if response is not None:
-              Logger.info('HDFS Repository created in Ranger Admin')
-            else:
-              Logger.info('HDFS Repository creation failed in Ranger Admin')
-        else:
-          Logger.info('Ambari admin username and password are blank ')
-      else:
-        Logger.info('Ranger service is not started on given host')
+      cmd = ('enable-hdfs-plugin.sh',)
+      
+      ranger_adm_obj = Rangeradmin(url=params.policymgr_mgr_url)
+      ranger_adm_obj.create_ranger_repository('hdfs', params.repo_name, params.hdfs_ranger_plugin_repo,
+                                              params.ambari_ranger_admin, params.ambari_ranger_password, 
+                                              params.admin_uname, params.admin_password, 
+                                              params.policy_user)
     else:
-      cmd = format('cd /usr/hdp/{hdp_version}/ranger-hdfs-plugin/ && sh disable-hdfs-plugin.sh')
-
-    Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+      cmd = ('disable-hdfs-plugin.sh',)
+      
+    cmd_env = {'JAVA_HOME': params.java_home, 'PWD': format('/usr/hdp/{hdp_version}/ranger-hdfs-plugin'), 'PATH': format('/usr/hdp/{hdp_version}/ranger-hdfs-plugin')}
+    
+    Execute(cmd, 
+          environment=cmd_env, 
+          logoutput=True,
+          sudo=True,
+    )                    
   else:
-    Logger.info('Ranger admin not installed')
-
-
-def write_properties_to_file(file_path, value):
-  for key in value:
-    modify_config(file_path, key, value[key])
-
-
-def modify_config(filepath, variable, setting):
-  var_found = False
-  already_set = False
-  V = str(variable)
-  S = str(setting)
-  # use quotes if setting has spaces #
-  if ' ' in S:
-    S = '%s' % S
-
-  for line in fileinput.input(filepath, inplace=1):
-    # process lines that look like config settings #
-    if not line.lstrip(' ').startswith('#') and '=' in line:
-      _infile_var = str(line.split('=')[0].rstrip(' '))
-      _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
-      # only change the first matching occurrence #
-      if var_found == False and _infile_var.rstrip(' ') == V:
-        var_found = True
-        # don't change it if it is already set #
-        if _infile_set.lstrip(' ') == S:
-          already_set = True
-        else:
-          line = "%s=%s\n" % (V, S)
-
-    sys.stdout.write(line)
-
-  # Append the variable if it wasn't found #
-  if not var_found:
-    with open(filepath, "a") as f:
-      f.write("%s=%s\n" % (V, S))
-  elif already_set == True:
-    pass
-  else:
-    pass
-
-  return
-
-
-def ranger_hdfs_properties():
-  import params
-
-  ranger_hdfs_properties = dict()
-
-  ranger_hdfs_properties['POLICY_MGR_URL'] = params.policymgr_mgr_url
-  ranger_hdfs_properties['SQL_CONNECTOR_JAR'] = params.sql_connector_jar
-  ranger_hdfs_properties['XAAUDIT.DB.FLAVOUR'] = params.xa_audit_db_flavor
-  ranger_hdfs_properties['XAAUDIT.DB.DATABASE_NAME'] = params.xa_audit_db_name
-  ranger_hdfs_properties['XAAUDIT.DB.USER_NAME'] = params.xa_audit_db_user
-  ranger_hdfs_properties['XAAUDIT.DB.PASSWORD'] = params.xa_audit_db_password
-  ranger_hdfs_properties['XAAUDIT.DB.HOSTNAME'] = params.xa_db_host
-  ranger_hdfs_properties['REPOSITORY_NAME'] = params.repo_name
-  ranger_hdfs_properties['XAAUDIT.DB.IS_ENABLED'] = params.db_enabled
-
-  ranger_hdfs_properties['XAAUDIT.HDFS.IS_ENABLED'] = params.hdfs_enabled
-  ranger_hdfs_properties['XAAUDIT.HDFS.DESTINATION_DIRECTORY'] = params.hdfs_dest_dir
-  ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY'] = params.hdfs_buffer_dir
-  ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY'] = params.hdfs_archive_dir
-  ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_FILE'] = params.hdfs_dest_file
-  ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS'] = params.hdfs_dest_flush_int_sec
-  ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS'] = params.hdfs_dest_rollover_int_sec
-  ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS'] = params.hdfs_dest_open_retry_int_sec
-  ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FILE'] = params.hdfs_buffer_file
-  ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS'] = params.hdfs_buffer_flush_int_sec
-  ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS'] = params.hdfs_buffer_rollover_int_sec
-  ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT'] = params.hdfs_archive_max_file_count
-
-  ranger_hdfs_properties['SSL_KEYSTORE_FILE_PATH'] = params.ssl_keystore_file
-  ranger_hdfs_properties['SSL_KEYSTORE_PASSWORD'] = params.ssl_keystore_password
-  ranger_hdfs_properties['SSL_TRUSTSTORE_FILE_PATH'] = params.ssl_truststore_file
-  ranger_hdfs_properties['SSL_TRUSTSTORE_PASSWORD'] = params.ssl_truststore_password
-
-  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.3') >= 0:
-    ranger_hdfs_properties['XAAUDIT.SOLR.IS_ENABLED'] = str(params.solr_enabled).lower()
-    ranger_hdfs_properties['XAAUDIT.SOLR.MAX_QUEUE_SIZE'] = params.solr_max_queue_size
-    ranger_hdfs_properties['XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS'] = params.solr_max_flush_interval
-    ranger_hdfs_properties['XAAUDIT.SOLR.SOLR_URL'] = params.solr_url
-
-  return ranger_hdfs_properties
-
-
-def hdfs_repo_properties():
-  import params
-
-  config_dict = dict()
-  config_dict['username'] = params.repo_config_username
-  config_dict['password'] = params.repo_config_password
-  config_dict['hadoop.security.authentication'] = params.hadoop_security_authentication
-  config_dict['hadoop.security.authorization'] = params.hadoop_security_authorization
-  config_dict['fs.default.name'] = params.fs_default_name
-  config_dict['hadoop.security.auth_to_local'] = params.hadoop_security_auth_to_local
-  config_dict['hadoop.rpc.protection'] = params.hadoop_rpc_protection
-  config_dict['commonNameForCertificate'] = params.common_name_for_certificate
-
-  if params.security_enabled:
-    config_dict['dfs.datanode.kerberos.principal'] = params.dn_principal_name
-    config_dict['dfs.namenode.kerberos.principal'] = params.nn_principal_name
-    config_dict['dfs.secondary.namenode.kerberos.principal'] = params.sn_principal_name
-  else:
-    config_dict['dfs.datanode.kerberos.principal'] = ''
-    config_dict['dfs.namenode.kerberos.principal'] = ''
-    config_dict['dfs.secondary.namenode.kerberos.principal'] = ''
-
-  repo = dict()
-  repo['isActive'] = "true"
-  repo['config'] = json.dumps(config_dict)
-  repo['description'] = "hdfs repo"
-  repo['name'] = params.repo_name
-  repo['repositoryType'] = "hdfs"
-  repo['assetType'] = '1'
-
-  data = json.dumps(repo)
-
-  return data
+    Logger.info('Ranger admin not installed')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
index 2bf5867..de82356 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
@@ -1,162 +1,209 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
+<!-- /** * Licensed to the Apache Software Foundation (ASF) under one * or 
+	more contributor license agreements. See the NOTICE file * distributed with 
+	this work for additional information * regarding copyright ownership. The 
+	ASF licenses this file * to you under the Apache License, Version 2.0 (the 
+	* "License"); you may not use this file except in compliance * with the License. 
+	You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 
+	* * Unless required by applicable law or agreed to in writing, software * 
+	distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT 
+	WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the 
+	License for the specific language governing permissions and * limitations 
+	under the License. */ -->
 <configuration supports_final="true">
 
-  <property>
-    <name>policy_user</name>
-    <value>ambari-qa</value>
-    <description>This user must be system user and also present at Ranger admin portal</description>
-  </property> 
-
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value>-</value>
-    <description>Used for repository creation on ranger admin</description>
-  </property>
-
-  <property>
-    <name>common.name.for.certificate</name>
-    <value>-</value>
-    <description>Used for repository creation on ranger admin</description>
-  </property>
-
-  <property>
-    <name>ranger-hdfs-plugin-enabled</name>
-    <value>No</value>
-    <description>Enable ranger hdfs plugin ?</description>
-  </property>
-
-  <property>
-    <name>REPOSITORY_CONFIG_USERNAME</name>
-    <value>hadoop</value>
-    <description>Used for repository creation on ranger admin</description>
-  </property>
-
-  <property>
-    <name>REPOSITORY_CONFIG_PASSWORD</name>
-    <value>hadoop</value>
-    <property-type>PASSWORD</property-type>
-    <description>Used for repository creation on ranger admin</description>
-  </property> 
-
-  <property>
-    <name>XAAUDIT.DB.IS_ENABLED</name>
-    <value>true</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.IS_ENABLED</name>
-    <value>false</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
-    <value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
-    <value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
-    <value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
-    <value>%hostname%-audit.log</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
-    <value>900</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
-    <value>86400</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
-    <value>60</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
-    <value>%time:yyyyMMdd-HHmm.ss%.log</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
-    <value>60</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
-    <value>600</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
-    <value>10</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>SSL_KEYSTORE_FILE_PATH</name>
-    <value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>SSL_KEYSTORE_PASSWORD</name>
-    <value>myKeyFilePassword</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>SSL_TRUSTSTORE_FILE_PATH</name>
-    <value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>SSL_TRUSTSTORE_PASSWORD</name>
-    <value>changeit</value>
-    <description></description>
-  </property>
+	<property>
+		<name>policy_user</name>
+		<value>ambari-qa</value>
+		<description>This user must be system user and also present at Ranger
+			admin portal</description>
+	</property>
+
+	<property>
+		<name>hadoop.rpc.protection</name>
+		<value>-</value>
+		<description>Used for repository creation on ranger admin
+		</description>
+	</property>
+
+	<property>
+		<name>common.name.for.certificate</name>
+		<value>-</value>
+		<description>Used for repository creation on ranger admin
+		</description>
+	</property>
+
+	<property>
+		<name>ranger-hdfs-plugin-enabled</name>
+		<value>No</value>
+		<description>Enable ranger hdfs plugin ?</description>
+	</property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_USERNAME</name>
+		<value>hadoop</value>
+		<description>Used for repository creation on ranger admin
+		</description>
+	</property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_PASSWORD</name>
+		<value>hadoop</value>
+		<property-type>PASSWORD</property-type>
+		<description>Used for repository creation on ranger admin
+		</description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%
+		</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>POLICY_MGR_URL</name>
+		<value>{{policymgr_mgr_url}}</value>
+		<description>Policy Manager url</description>
+	</property>
+
+	<property>
+		<name>SQL_CONNECTOR_JAR</name>
+		<value>{{sql_connector_jar}}</value>
+		<description>Location of DB client library (please check the location
+			of the jar file)</description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.FLAVOUR</name>
+		<value>{{xa_audit_db_flavor}}</value>
+		<description>The database type to be used (mysql/oracle)</description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.DATABASE_NAME</name>
+		<value>{{xa_audit_db_name}}</value>
+		<description>Audit database name</description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.USER_NAME</name>
+		<value>{{xa_audit_db_user}}</value>
+		<description>Audit database user</description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.PASSWORD</name>
+		<value>{{xa_audit_db_password}}</value>
+		<description>Audit database password</description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.HOSTNAME</name>
+		<value>{{xa_db_host}}</value>
+		<description>Audit database password</description>
+	</property>
+
+	<property>
+		<name>REPOSITORY_NAME</name>
+		<value>{{repo_name}}</value>
+		<description>Ranger repository name</description>
+	</property>
 
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
index 1171e7f..f6c9bb4 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
@@ -607,6 +607,35 @@
         },
         "flume-log4j": {
           "content": "log4jproperties\nline2"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
index 956baad..aeecb79 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
@@ -328,11 +328,34 @@
             "ipc.client.connection.maxidletime": "30000", 
             "ipc.client.connect.max.retries": "50"
         }, 
-        "ranger-hdfs-plugin-properties" : {
-            "ranger-hdfs-plugin-enabled":"yes"
-        },
-        "ranger-hbase-plugin-properties" : {
-            "ranger-hbase-plugin-enabled":"yes"
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         },
         "yarn-env": {
             "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 90cae08..5e4a2d1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -645,6 +645,35 @@
             "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
             "REPOSITORY_CONFIG_PASSWORD": "hbase", 
             "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
index 41783c2..b1be0ff 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
@@ -610,6 +610,35 @@
         },
         "flume-log4j": {
           "content": "log4jproperties\nline2"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
index e59e544..b2fd6e8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
@@ -595,6 +595,35 @@
         },
         "flume-log4j": {
           "content": "log4jproperties\nline2"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
index a8b6a42..86747c5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
@@ -486,6 +486,35 @@
         "sqoop-env": {
             "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
             "sqoop_user": "sqoop"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
index 20595e0..845851b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
@@ -486,6 +486,35 @@
         "sqoop-env": {
             "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
             "sqoop_user": "sqoop"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
index 7cd17bc..abe0ddb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -487,6 +487,35 @@
         "sqoop-env": {
             "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
             "sqoop_user": "sqoop"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
index 68285e2..f2d98c8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
@@ -506,6 +506,35 @@
         "sqoop-env": {
             "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"",
             "sqoop_user": "sqoop"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
index 61cca8e..5999fb7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
@@ -177,6 +177,35 @@
             "smokeuser": "ambari-qa", 
             "sqoop_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/", 
             "tez_tar_source": "/usr/hdp/current/tez-client/lib/tez.tar.gz"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     }, 
     "commandId": "23-10", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
index a6d1408..5092c91 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
@@ -303,6 +303,35 @@
             "mapred_user": "mapred", 
             "jobhistory_heapsize": "900", 
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     }, 
     "configurationTags": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
index 07d2e30..daa46af 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
@@ -305,6 +305,35 @@
             "mapred_user": "mapred", 
             "jobhistory_heapsize": "900", 
             "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     }, 
     "configurationTags": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index 6cce47c..7d66e72 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -661,6 +661,35 @@
             "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
             "REPOSITORY_CONFIG_PASSWORD": "hbase", 
             "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
+        },
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.SOLR.SOLR_URL": "http://localhost:6083/solr/ranger_audits", 
+            "XAAUDIT.SOLR.IS_ENABLED": "false", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "Yes", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS": "1000", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
+            "XAAUDIT.SOLR.MAX_QUEUE_SIZE": "1"
         }
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e44537b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index e8c58cf..0bbde77 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -133,28 +133,19 @@ class RMFTestCase(TestCase):
     if 'status_params' in sys.modules:
       del(sys.modules["status_params"])
 
-    # run
-    if try_install:
-      with Environment(basedir, test_mode=True) as RMFTestCase.env:
-        with patch('resource_management.core.shell.checked_call', side_effect=checked_call_mocks) as mocks_dict['checked_call']:
-          with patch('resource_management.core.shell.call', side_effect=call_mocks) as mocks_dict['call']:
-            with patch.object(Script, 'get_config', return_value=self.config_dict) as mocks_dict['get_config']: # mocking configurations
-              with patch.object(Script, 'get_tmp_dir', return_value="/tmp") as mocks_dict['get_tmp_dir']:
-                with patch.object(Script, 'install_packages') as mocks_dict['install_packages']:
-                  with patch('resource_management.libraries.functions.get_kinit_path', return_value=kinit_path_local) as mocks_dict['get_kinit_path']:
-                    with patch.object(platform, 'linux_distribution', return_value=os_type) as mocks_dict['linux_distribution']:
-                      with patch.object(os, "environ", new=os_env) as mocks_dict['environ']:
-                        method(RMFTestCase.env)
-    else:
-      with Environment(basedir, test_mode=True) as RMFTestCase.env:
-        with patch('resource_management.core.shell.checked_call', side_effect=checked_call_mocks) as mocks_dict['checked_call']:
-          with patch('resource_management.core.shell.call', side_effect=call_mocks) as mocks_dict['call']:
-            with patch.object(Script, 'get_config', return_value=self.config_dict) as mocks_dict['get_config']: # mocking configurations
-              with patch.object(Script, 'get_tmp_dir', return_value="/tmp") as mocks_dict['get_tmp_dir']:
-                  with patch('resource_management.libraries.functions.get_kinit_path', return_value=kinit_path_local) as mocks_dict['get_kinit_path']:
-                    with patch.object(platform, 'linux_distribution', return_value=os_type) as mocks_dict['linux_distribution']:
-                      with patch.object(os, "environ", new=os_env) as mocks_dict['environ']:
+    with Environment(basedir, test_mode=True) as RMFTestCase.env:
+      with patch('resource_management.core.shell.checked_call', side_effect=checked_call_mocks) as mocks_dict['checked_call']:
+        with patch('resource_management.core.shell.call', side_effect=call_mocks) as mocks_dict['call']:
+          with patch.object(Script, 'get_config', return_value=self.config_dict) as mocks_dict['get_config']: # mocking configurations
+            with patch.object(Script, 'get_tmp_dir', return_value="/tmp") as mocks_dict['get_tmp_dir']:
+              with patch('resource_management.libraries.functions.get_kinit_path', return_value=kinit_path_local) as mocks_dict['get_kinit_path']:
+                with patch.object(platform, 'linux_distribution', return_value=os_type) as mocks_dict['linux_distribution']:
+                  with patch.object(os, "environ", new=os_env) as mocks_dict['environ']:
+                    if not try_install:
+                      with patch.object(Script, 'install_packages') as install_mock_value:
                         method(RMFTestCase.env)
+                    else:
+                      method(RMFTestCase.env)
 
     sys.path.remove(scriptsdir)