You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2015/01/12 05:37:15 UTC

[1/2] ambari git commit: Revert "Revert "AMBARI-8949. Support Ranger installation via Ambari. (gautam borad via jaimin)""

Repository: ambari
Updated Branches:
  refs/heads/trunk 01b3af1b7 -> 31cdf9fab


http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
new file mode 100644
index 0000000..bb7d94c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+
+config  = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+hdp_stack_version         = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version         = format_hdp_stack_version(hdp_stack_version)
+stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0
+
+if stack_is_hdp22_or_further:
+	ranger_home    = '/usr/hdp/current/ranger-admin'
+	ranger_stop    = '/usr/bin/ranger-admin-stop'
+	ranger_start   = '/usr/bin/ranger-admin-start'
+	usersync_home  = '/usr/hdp/current/ranger-usersync'
+	usersync_start = '/usr/bin/ranger-usersync-start'
+	usersync_stop  = '/usr/bin/ranger-usersync-stop'
+else:
+	pass
+
+java_home = config['hostLevelParams']['java_home']
+unix_user  = default("/configurations/ranger-env/unix_user", "ranger")
+unix_group = default("/configurations/ranger-env/unix_group", "ranger")
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
new file mode 100644
index 0000000..c916162
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from setup_ranger import setup_ranger
+
+class RangerAdmin(Script):
+    def install(self, env):
+        self.install_packages(env)
+        setup_ranger(env)
+
+    def stop(self, env):
+        import params
+        env.set_params(params)
+        Execute(format('{params.ranger_stop}'))
+
+    def start(self, env):
+        import params
+        setup_ranger(env)
+        Execute(format('{params.ranger_start}'))
+     
+    def status(self, env):
+        pass
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+
+
+if __name__ == "__main__":
+  RangerAdmin().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
new file mode 100644
index 0000000..c4ed7ea
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_usersync.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from setup_ranger import setup_usersync
+
+class RangerUsersync(Script):
+    def install(self, env):
+        self.install_packages(env)
+        setup_usersync(env)        
+
+    def stop(self, env):
+        import params
+        Execute(format('{params.usersync_stop}'))
+
+    def start(self, env):
+        import params
+        setup_usersync(env)
+        Execute(format('{params.usersync_start}'))
+     
+    def status(self, env):
+        pass
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+
+
+if __name__ == "__main__":
+  RangerUsersync().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
new file mode 100644
index 0000000..265018d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import fileinput
+from resource_management import *
+from resource_management.core.logger import Logger
+
+def setup_ranger(env):
+    import params
+    env.set_params(params)
+
+    if check_db_connnection(env):
+        file_path = params.ranger_home + '/install.properties'
+        write_properties_to_file(file_path, params.config['configurations']['admin-properties'])
+    
+        cmd = format('cd {ranger_home} && sh setup.sh')
+        Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+
+def setup_usersync(env):
+    import params
+    env.set_params(params)
+
+    file_path = params.usersync_home + '/install.properties'
+    write_properties_to_file(file_path, usersync_properties(params))
+    
+    cmd = format('cd {usersync_home} && sh setup.sh')
+    Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])        
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def usersync_properties(params):
+    d = dict()
+
+    d['POLICY_MGR_URL'] = params.config['configurations']['admin-properties']['policymgr_external_url']
+    
+    d['SYNC_SOURCE'] = params.config['configurations']['usersync-properties']['SYNC_SOURCE']
+    d['MIN_UNIX_USER_ID_TO_SYNC'] = params.config['configurations']['usersync-properties']['MIN_UNIX_USER_ID_TO_SYNC']
+    d['SYNC_INTERVAL'] = params.config['configurations']['usersync-properties']['SYNC_INTERVAL']
+    d['SYNC_LDAP_URL'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_URL']
+    d['SYNC_LDAP_BIND_DN'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_BIND_DN']
+    d['SYNC_LDAP_BIND_PASSWORD'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_BIND_PASSWORD']
+    d['CRED_KEYSTORE_FILENAME'] = params.config['configurations']['usersync-properties']['CRED_KEYSTORE_FILENAME']
+    d['SYNC_LDAP_USER_SEARCH_BASE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_BASE']
+    d['SYNC_LDAP_USER_SEARCH_SCOPE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_SCOPE']
+    d['SYNC_LDAP_USER_OBJECT_CLASS'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_OBJECT_CLASS']
+    d['SYNC_LDAP_USER_SEARCH_FILTER'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_SEARCH_FILTER']
+    d['SYNC_LDAP_USER_NAME_ATTRIBUTE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_NAME_ATTRIBUTE']
+    d['SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE']
+    d['SYNC_LDAP_USERNAME_CASE_CONVERSION'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_USERNAME_CASE_CONVERSION']
+    d['SYNC_LDAP_GROUPNAME_CASE_CONVERSION'] = params.config['configurations']['usersync-properties']['SYNC_LDAP_GROUPNAME_CASE_CONVERSION']
+    d['logdir'] = params.config['configurations']['usersync-properties']['logdir']
+
+    return d
+
+def check_db_connnection(env):
+    import params
+    env.set_params(params)
+    
+    db_root_password = params.config['configurations']['admin-properties']["db_root_password"]
+    db_root_user = params.config['configurations']['admin-properties']["db_root_user"]
+    db_host = params.config['configurations']['admin-properties']['db_host']
+    sql_command_invoker = params.config['configurations']['admin-properties']['SQL_COMMAND_INVOKER']
+
+    Logger.info('Checking MYSQL root password')
+
+    cmd_str = "\""+sql_command_invoker+"\""+" -u "+db_root_user+" --password="+db_root_password+" -h "+db_host+" -s -e \"select version();\""
+    status, output = get_status_output(cmd_str)
+
+    if status == 0:
+        Logger.info('Checking MYSQL root password DONE')
+        return True 
+    else:
+        Logger.info('Ranger Admin Installation Failed, Ranger Host requires DB client installed and running to setup DB on given host')
+        sys.exit(1)
+
+def get_status_output(cmd):
+    import subprocess
+
+    ret = subprocess.call(cmd, shell=True)
+    return ret, ret

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
index e8bbe32..9b6465a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
@@ -3,6 +3,19 @@
   "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
   "general_deps" : {
     "_comment" : "dependencies for all cases",
+    "NAMENODE-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "DATANODE-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "SECONDARY_NAMENODE-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "HDFS_CLIENT-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "HBASE_MASTER-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "HBASE_REGIONSERVER-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "HIVE_SERVER-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "KNOX_GATEWAY-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "RANGER_USERSYNC-INSTALL" : ["RANGER_ADMIN-INSTALL"],
+    "NAMENODE-START" : ["RANGER_ADMIN-START"],
+    "HBASE_MASTER-START" : ["RANGER_ADMIN-START"],
+    "HIVE_SERVER-START" : ["RANGER_ADMIN-START"],
+    "KNOX_GATEWAY-START" : ["RANGER_ADMIN-START"],    
     "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START", "OOZIE_SERVER-START"],
     "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
     "FLUME_SERVICE_CHECK-SERVICE_CHECK": ["FLUME_HANDLER-START"],

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
new file mode 100644
index 0000000..d393d97
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
@@ -0,0 +1,150 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+                <name>ranger-hbase-plugin-enabled</name>
+                <value>Yes</value>
+                <description>Enable ranger hbase plugin ?</description>
+        </property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_USERNAME</name>
+		<value>hbase</value>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>REPOSITORY_CONFIG_PASSWORD</name>
+		<value>hbase</value>
+		<property-type>PASSWORD</property-type>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>UPDATE_XAPOLICIES_ON_GRANT_REVOKE</name>
+		<value>true</value>
+		<description></description>
+	</property>
+
+</configuration>	

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
new file mode 100644
index 0000000..4b549f9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
@@ -0,0 +1,144 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+               <name>ranger-hdfs-plugin-enabled</name>
+               <value>Yes</value>
+               <description>Enable ranger hdfs plugin ?</description>
+        </property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_USERNAME</name>
+		<value>hadoop</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_PASSWORD</name>
+		<value>hadoop</value>
+		<property-type>PASSWORD</property-type>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+</configuration>	

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
new file mode 100644
index 0000000..6bf38de
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/ranger-hive-plugin-properties.xml
@@ -0,0 +1,150 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+                <name>ranger-hive-plugin-enabled</name>
+                <value>Yes</value>
+                <description>Enable ranger hive plugin ?</description>
+        </property>
+
+	<property>
+	        <name>REPOSITORY_CONFIG_USERNAME</name>
+        	<value>hive</value>
+	        <description></description>
+	</property>
+
+	<property>
+        	<name>REPOSITORY_CONFIG_PASSWORD</name>
+	        <value>hive</value>
+	        <property-type>PASSWORD</property-type>
+	        <description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>UPDATE_XAPOLICIES_ON_GRANT_REVOKE</name>
+		<value>true</value>
+		<description></description>
+	</property>	
+
+</configuration>	

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml
new file mode 100644
index 0000000..5f91087
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/RANGER/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>RANGER</name>
+            <extends>common-services/RANGER/0.4.0</extends>		
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
index 05aba97..b1f5d73 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
@@ -9,7 +9,7 @@
                 "dfs.support.append": "true", 
                 "dfs.namenode.http-address": "true"
             }
-        }, 
+        },
         "yarn-log4j": {}, 
         "hadoop-policy": {}, 
         "hdfs-log4j": {}, 
@@ -326,7 +326,13 @@
             "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
             "ipc.client.connection.maxidletime": "30000", 
             "ipc.client.connect.max.retries": "50"
-        }, 
+        },
+        "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+        },
+        "ranger-hbase-plugin-properties" : {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "yarn-env": {
             "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
             "apptimelineserver_heapsize": "1024", 
@@ -521,7 +527,8 @@
             "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}", 
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
-        }, 
+        },
+
         "zoo.cfg": {
             "clientPort": "2181", 
             "autopurge.purgeInterval": "24", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 82b485b..a70e90e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -371,7 +371,13 @@
             "hive.server2.enable.doAs": "true",
             "hive.server2.authentication": "NOSASL",
             "hive.optimize.mapjoin.mapreduce": "true"
-        }, 
+        },
+        "ranger-hive-plugin-properties": {
+            "ranger-hive-plugin-enabled":"yes"
+        },
+        "ranger-knox-plugin-properties": {
+            "ranger-knox-plugin-enabled":"yes"
+        },
         "yarn-site": {
             "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
             "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
index f544b88..b0c962f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
@@ -330,7 +330,10 @@
             "hadoop.proxyuser.hcat.groups": "users", 
             "ipc.client.connection.maxidletime": "30000", 
             "ipc.client.connect.max.retries": "50"
-        }, 
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
index 253747a..fdef520 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -227,7 +227,10 @@
             "ha.zookeeper.quorum": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181,c6403.ambari.apache.org:2181", 
             "ipc.client.connection.maxidletime": "30000", 
             "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT"
-        }, 
+        },
+         "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+         },
         "hdfs-log4j": {
             "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout", 
             "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
index 8e6b3d4..99fe020 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
@@ -532,7 +532,10 @@
             "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}", 
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
-        }, 
+        },
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
index 410e70e..f48863b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
@@ -527,7 +527,10 @@
             "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}", 
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
-        }, 
+        },
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
index ee46527..1a1af3e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
@@ -81,7 +81,10 @@
             "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}", 
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
-        }, 
+        },
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "cluster-env": {
             "security_enabled": "false", 
             "hive_tar_source": "/usr/hdp/current/hive-client/hive.tar.gz", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
index 89face6..4cf5cd7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
@@ -532,7 +532,10 @@
             "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}", 
             "hbase_regionserver_heapsize": "1024m", 
             "hbase_log_dir": "/var/log/hbase"
-        }, 
+        },
+        "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+        },
         "ganglia-env": {
             "gmond_user": "nobody", 
             "ganglia_runtime_dir": "/var/run/ganglia/hdp", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index d48b0ab..8e93823 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -367,7 +367,13 @@
             "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](oozie@.*EXAMPLE.COM)s/.*/oozie/\nRULE:[2:$1@$0](jhs@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nDEFAULT", 
             "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
+        "ranger-hdfs-plugin-properties" : {
+            "ranger-hdfs-plugin-enabled":"yes"
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
index 69c7b4d..6714686 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
@@ -365,7 +365,10 @@
             "hadoop.security.auth_to_local": "RULE:[2:$1@$0](rm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nm@.*EXAMPLE.COM)s/.*/yarn/\nRULE:[2:$1@$0](nn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](dn@.*EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](hbase@.*EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](oozie@.*EXAMPLE.COM)s/.*/oozie/\nRULE:[2:$1@$0](jhs@.*EXAMPLE.COM)s/.*/mapred/\nRULE:[2:$1@$0](jn/_HOST@.*EXAMPLE.COM)s/.*/hdfs/\nDEFAULT", 
             "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
             "ipc.client.connection.maxidletime": "30000"
-        }, 
+        },
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
         "hive-site": {
             "hive.enforce.sorting": "true", 
             "javax.jdo.option.ConnectionPassword": "!`\"' 1", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/default.json b/ambari-server/src/test/python/stacks/2.2/configs/default.json
index 28a17ae..d37011d 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/default.json
@@ -149,8 +149,13 @@
         "kafka.ganglia.metrics.port": "8649",
         "log.index.interval.bytes": "4096",
         "log.retention.hours": "168"
-      }
-
+      },
+      "ranger-hbase-plugin-properties": {
+            "ranger-hbase-plugin-enabled":"yes"
+      },
+      "ranger-hive-plugin-properties": {
+            "ranger-hive-plugin-enabled":"yes"
+        }
 
     },
     "configuration_attributes": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
index 35aedc0..8783b64 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
@@ -312,6 +312,11 @@
             "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT",
             "ipc.client.connection.maxidletime": "30000"
         },
+
+        "ranger-hive-plugin-properties" : {
+            "ranger-hive-plugin-enabled":"yes"
+        },
+
         "hadoop-env": {
             "dtnode_heapsize": "1024m",
             "namenode_opt_maxnewsize": "200m",

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-web/app/config.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/config.js b/ambari-web/app/config.js
index 8e2e562..a22430a 100644
--- a/ambari-web/app/config.js
+++ b/ambari-web/app/config.js
@@ -61,6 +61,7 @@ App.supports = {
   autoRollbackHA: false,
   alwaysEnableManagedMySQLForHive: false,
   automatedKerberos: false,
+  ranger: false,
   customizeAgentUserAccount: false,
   installGanglia: false
 };


[2/2] ambari git commit: Revert "Revert "AMBARI-8949. Support Ranger installation via Ambari. (gautam borad via jaimin)""

Posted by ma...@apache.org.
Revert "Revert "AMBARI-8949. Support Ranger installation via Ambari. (gautam borad via jaimin)""

This reverts commit 01b3af1b7d191019aafc0d8d670941548a7d4b14.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/31cdf9fa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/31cdf9fa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/31cdf9fa

Branch: refs/heads/trunk
Commit: 31cdf9fab34c07f8a1f1f5f6b97de432ea55fc44
Parents: 01b3af1
Author: Mahadev Konar <ma...@apache.org>
Authored: Sun Jan 11 20:37:05 2015 -0800
Committer: Mahadev Konar <ma...@apache.org>
Committed: Sun Jan 11 20:37:05 2015 -0800

----------------------------------------------------------------------
 .../libraries/functions/ranger_admin.py         | 119 ++++++++++++
 .../HBASE/0.96.0.2.0/metainfo.xml               |   1 +
 .../0.96.0.2.0/package/scripts/hbase_master.py  |   7 +-
 .../HBASE/0.96.0.2.0/package/scripts/params.py  |  13 ++
 .../package/scripts/setup_ranger_hbase.py       | 184 ++++++++++++++++++
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml |   1 +
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |   4 +-
 .../HDFS/2.1.0.2.0/package/scripts/params.py    |  14 ++
 .../package/scripts/setup_ranger_hdfs.py        | 186 +++++++++++++++++++
 .../HIVE/0.12.0.2.0/metainfo.xml                |   1 +
 .../0.12.0.2.0/package/scripts/hive_server.py   |   5 +-
 .../HIVE/0.12.0.2.0/package/scripts/params.py   |  12 ++
 .../package/scripts/setup_ranger_hive.py        | 182 ++++++++++++++++++
 .../ranger-knox-plugin-properties.xml           | 157 ++++++++++++++++
 .../common-services/KNOX/0.5.0.2.2/metainfo.xml |   1 +
 .../0.5.0.2.2/package/scripts/knox_gateway.py   |   3 +
 .../KNOX/0.5.0.2.2/package/scripts/params.py    |  11 ++
 .../package/scripts/setup_ranger_knox.py        | 184 ++++++++++++++++++
 .../0.4.0/configuration/admin-properties.xml    | 180 ++++++++++++++++++
 .../RANGER/0.4.0/configuration/ranger-env.xml   |  49 +++++
 .../0.4.0/configuration/usersync-properties.xml | 103 ++++++++++
 .../common-services/RANGER/0.4.0/metainfo.xml   |  78 ++++++++
 .../RANGER/0.4.0/package/scripts/params.py      |  44 +++++
 .../0.4.0/package/scripts/ranger_admin.py       |  49 +++++
 .../0.4.0/package/scripts/ranger_usersync.py    |  47 +++++
 .../0.4.0/package/scripts/setup_ranger.py       | 132 +++++++++++++
 .../stacks/HDP/2.2/role_command_order.json      |  13 ++
 .../ranger-hbase-plugin-properties.xml          | 150 +++++++++++++++
 .../ranger-hdfs-plugin-properties.xml           | 144 ++++++++++++++
 .../ranger-hive-plugin-properties.xml           | 150 +++++++++++++++
 .../stacks/HDP/2.2/services/RANGER/metainfo.xml |  29 +++
 .../stacks/2.0.6/configs/client-upgrade.json    |  13 +-
 .../python/stacks/2.0.6/configs/default.json    |   8 +-
 .../stacks/2.0.6/configs/default_client.json    |   5 +-
 .../python/stacks/2.0.6/configs/ha_default.json |   5 +-
 .../python/stacks/2.0.6/configs/hbase-2.2.json  |   5 +-
 .../stacks/2.0.6/configs/hbase-check-2.2.json   |   5 +-
 .../stacks/2.0.6/configs/hbase-preupgrade.json  |   5 +-
 .../stacks/2.0.6/configs/hbase-rs-2.2.json      |   5 +-
 .../python/stacks/2.0.6/configs/secured.json    |   8 +-
 .../stacks/2.0.6/configs/secured_client.json    |   5 +-
 .../test/python/stacks/2.2/configs/default.json |   9 +-
 .../python/stacks/2.2/configs/hive-upgrade.json |   5 +
 ambari-web/app/config.js                        |   1 +
 44 files changed, 2312 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-common/src/main/python/resource_management/libraries/functions/ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/ranger_admin.py b/ambari-common/src/main/python/resource_management/libraries/functions/ranger_admin.py
new file mode 100644
index 0000000..98509d8
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/ranger_admin.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import time
+import pycurl
+import sys
+from StringIO import StringIO as BytesIO
+import json
+from resource_management.core.logger import Logger
+
+class Rangeradmin:
+  sInstance = None
+  def __init__(self, url= 'http://localhost:6080'):
+    
+    self.baseUrl      =  url 
+    self.urlLogin     = self.baseUrl + '/login.jsp'
+    self.urlLoginPost = self.baseUrl + '/j_spring_security_check'
+    self.urlRepos     = self.baseUrl + '/service/assets/assets'
+    self.urlReposPub  = self.baseUrl + '/service/public/api/repository'
+    self.urlPolicies  = self.baseUrl + '/service/assets/resources'
+    self.urlGroups    = self.baseUrl + '/service/xusers/groups'
+    self.urlUsers     = self.baseUrl + '/service/xusers/users'   
+    self.urlSecUsers  = self.baseUrl + '/service/xusers/secure/users'   
+
+    self.session    = None
+    self.isLoggedIn = False
+
+  def get_repository_by_name_pycurl(self, name, component, status, usernamepassword):
+    searchRepoURL = self.urlReposPub + "?name=" + name + "&type=" + component + "&status=" + status
+    responseCode, response = self.call_pycurl_request(url = searchRepoURL,data='',method='get',usernamepassword=usernamepassword)
+
+    if response is None:
+      return None
+    elif responseCode == 200: 
+      repos = json.loads(response)
+      if repos is not None and len(repos['vXRepositories']) > 0:
+        for repo in repos['vXRepositories']:
+          repoDump = json.loads(json.JSONEncoder().encode(repo))
+          if repoDump['name'] == name:
+            return repoDump
+        return None            
+    else:
+      Logger.error('Error occurred while creating repository')
+      return None
+
+  def create_repository_pycurl(self, data, usernamepassword):
+    searchRepoURL = self.urlReposPub
+    responseCode, response = self.call_pycurl_request(url =searchRepoURL, data=data, method='post', usernamepassword=usernamepassword)
+
+    if response is None:
+      return None
+    elif responseCode != 200:
+      Logger.info('Request for repository is not saved ,response is : %s', response)
+    elif responseCode == 200:
+      Logger.info('Repository created Successfully')
+      return response
+    else:
+      return None  
+
+  def call_pycurl_request(self, url, data, method, usernamepassword):
+    buffer = BytesIO()
+    header = BytesIO()
+    url = str(url)
+    # Creating PyCurl Requests
+    c = pycurl.Curl()
+    c.setopt(pycurl.URL,url)
+    c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json','Accept: application/json'])
+    c.setopt(pycurl.USERPWD, usernamepassword)
+    c.setopt(pycurl.VERBOSE, 0)
+    c.setopt(pycurl.WRITEFUNCTION ,buffer.write )
+    c.setopt(pycurl.HEADERFUNCTION,header.write)
+    c.setopt(pycurl.CONNECTTIMEOUT, 60)
+    # setting proper method and parameters
+    if method == 'get':
+      c.setopt(pycurl.HTTPGET, 1)
+    elif method == 'post':
+      c.setopt(pycurl.POST, 1)
+      c.setopt(pycurl.POSTFIELDS, data)
+    elif method == 'put':
+      c.setopt(pycurl.CUSTOMREQUEST, "PUT")
+      c.setopt(pycurl.POSTFIELDS, str(data))
+    elif method == 'delete':
+      c.setopt(pycurl.CUSTOMREQUEST, "DELETE")
+      c.setopt(pycurl.POSTFIELDS, str(data))
+    else:
+      Logger.error('Invalid option given for curl request')
+    
+    try:
+      # making request
+      c.perform()
+      # getting response
+      responseCode = c.getinfo(pycurl.HTTP_CODE)
+      response = buffer.getvalue()
+      headerResponse = header.getvalue()
+      c.close()
+      buffer.close()
+      header.close()
+      return responseCode, response
+    except Exception, e:
+        Logger.error(str(e))
+        if c is not None:
+          c.close()		 
+    return None, None

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
index 18572ef..c00889e 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
@@ -137,6 +137,7 @@
         <config-type>hbase-site</config-type>
         <config-type>hbase-env</config-type>
         <config-type>hbase-log4j</config-type>
+        <config-type>ranger-hbase-plugin-properties</config-type>
       </configuration-dependencies>
 
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
index d54ecb9..570b124 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
@@ -27,7 +27,7 @@ from hbase import hbase
 from hbase_service import hbase_service
 from hbase_decommission import hbase_decommission
 import upgrade
-
+from setup_ranger_hbase import setup_ranger_hbase
          
 class HbaseMaster(Script):
 
@@ -36,6 +36,7 @@ class HbaseMaster(Script):
 
   def install(self, env):
     self.install_packages(env)
+    setup_ranger_hbase(env)
     
   def configure(self, env):
     import params
@@ -52,11 +53,11 @@ class HbaseMaster(Script):
     import params
     env.set_params(params)
     self.configure(env) # for security
-
+    
     hbase_service( 'master',
       action = 'start'
     )
-
+    setup_ranger_hbase(env)
     self.save_component_version_to_structured_out(params.stack_name)
     
   def stop(self, env, rolling_restart=False):

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
index ace3901..00d9ac6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
@@ -162,3 +162,16 @@ if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
     region_mover = format("/usr/hdp/current/hbase-{role_root}/bin/region_mover.rb")
     region_drainer = format("/usr/hdp/current/hbase-{role_root}/bin/draining_servers.rb")
     hbase_cmd = format("/usr/hdp/current/hbase-{role_root}/bin/hbase")
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  # Setting Flag value for ranger hbase plugin
+  enable_ranger_hbase = False
+  user_input = config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled']
+  if user_input.lower() == 'yes':
+    enable_ranger_hbase = True
+  elif user_input.lower() == 'no':
+    enable_ranger_hbase = False
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
new file mode 100644
index 0000000..21dfbc8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import fileinput
+import subprocess
+import json
+import re
+from resource_management import *
+from resource_management.libraries.functions.ranger_admin import Rangeradmin
+from resource_management.core.logger import Logger
+
+def setup_ranger_hbase(env):
+    import params
+    env.set_params(params)
+
+    if params.has_ranger_admin:
+        try:
+            command = 'hdp-select status hbase-client'
+            return_code, hdp_output = shell.call(command, timeout=20)
+        except Exception, e:
+            Logger.error(str(e))
+            raise Fail('Unable to execute hdp-select command to retrieve the version.')
+
+        if return_code != 0:
+            raise Fail('Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+        hdp_version = re.sub('hbase-client - ', '', hdp_output)
+        match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
+
+        if match is None:
+            raise Fail('Failed to get extracted version')
+
+        file_path = '/usr/hdp/'+ hdp_version +'/ranger-hbase-plugin/install.properties'
+
+        ranger_hbase_dict = ranger_hbase_properties(params)
+        hbase_repo_data = hbase_repo_properties(params)
+
+        write_properties_to_file(file_path, ranger_hbase_dict)
+
+        if params.enable_ranger_hbase:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hbase-plugin/ && sh enable-hbase-plugin.sh')
+            ranger_adm_obj = Rangeradmin(url=ranger_hbase_dict['POLICY_MGR_URL'])
+            response_code, response_recieved = ranger_adm_obj.call_pycurl_request(ranger_hbase_dict['POLICY_MGR_URL'] + '/login.jsp', '', 'get', 'test:test')
+
+            if response_code is not None and response_code == 200:
+                repo = ranger_adm_obj.get_repository_by_name_pycurl(ranger_hbase_dict['REPOSITORY_NAME'], 'hbase', 'true', 'admin:admin')
+
+                if repo and repo['name'] == ranger_hbase_dict['REPOSITORY_NAME']:
+                    Logger.info('Hbase Repository exist')
+                else:
+                    response = ranger_adm_obj.create_repository_pycurl(hbase_repo_data, 'admin:admin')
+                    if response is not None:
+                        Logger.info('Hbase Repository created')
+                    else:
+                        Logger.info('Hbase Repository creation failed')
+            else:
+                Logger.info('Ranger service is not started on given host')
+        else:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hbase-plugin/ && sh disable-hbase-plugin.sh')
+
+        Execute(cmd, environment={'JAVA_HOME': params.java64_home}, logoutput=True)                    
+    else:
+        Logger.info('Ranger admin not installed')
+
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])
+
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+    # use quotes if setting has spaces #
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        # process lines that look like config settings #
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            # only change the first matching occurrence #
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                # don't change it if it is already set #
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    # Append the variable if it wasn't found #
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def ranger_hbase_properties(params):
+    ranger_hbase_properties = dict()
+
+    ranger_hbase_properties['POLICY_MGR_URL']           = params.config['configurations']['admin-properties']['policymgr_external_url']
+    ranger_hbase_properties['SQL_CONNECTOR_JAR']        = params.config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
+    ranger_hbase_properties['XAAUDIT.DB.FLAVOUR']       = params.config['configurations']['admin-properties']['DB_FLAVOR']
+    ranger_hbase_properties['XAAUDIT.DB.DATABASE_NAME'] = params.config['configurations']['admin-properties']['audit_db_name']
+    ranger_hbase_properties['XAAUDIT.DB.USER_NAME']     = params.config['configurations']['admin-properties']['audit_db_user']
+    ranger_hbase_properties['XAAUDIT.DB.PASSWORD']      = params.config['configurations']['admin-properties']['audit_db_password']
+    ranger_hbase_properties['XAAUDIT.DB.HOSTNAME']      = params.config['configurations']['admin-properties']['db_host']
+    ranger_hbase_properties['REPOSITORY_NAME']          = params.config['clusterName'] + '_hbase'
+
+    ranger_hbase_properties['XAAUDIT.DB.IS_ENABLED']   = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.DB.IS_ENABLED']
+
+    ranger_hbase_properties['XAAUDIT.HDFS.IS_ENABLED'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.IS_ENABLED']
+    ranger_hbase_properties['XAAUDIT.HDFS.DESTINATION_DIRECTORY'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.DESTINATION_DIRECTORY']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY']
+    ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_FILE'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FILE']
+    ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS']
+    ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FILE'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FILE']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT'] = params.config['configurations']['ranger-hbase-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT']
+    
+
+    ranger_hbase_properties['SSL_KEYSTORE_FILE_PATH'] = params.config['configurations']['ranger-hbase-plugin-properties']['SSL_KEYSTORE_FILE_PATH']
+    ranger_hbase_properties['SSL_KEYSTORE_PASSWORD'] = params.config['configurations']['ranger-hbase-plugin-properties']['SSL_KEYSTORE_PASSWORD']
+    ranger_hbase_properties['SSL_TRUSTSTORE_FILE_PATH'] = params.config['configurations']['ranger-hbase-plugin-properties']['SSL_TRUSTSTORE_FILE_PATH']
+    ranger_hbase_properties['SSL_TRUSTSTORE_PASSWORD'] = params.config['configurations']['ranger-hbase-plugin-properties']['SSL_TRUSTSTORE_PASSWORD']
+    
+    ranger_hbase_properties['UPDATE_XAPOLICIES_ON_GRANT_REVOKE'] = params.config['configurations']['ranger-hbase-plugin-properties']['UPDATE_XAPOLICIES_ON_GRANT_REVOKE']
+
+    return ranger_hbase_properties    
+
+def hbase_repo_properties(params):
+
+    config_dict = dict()
+    config_dict['username'] = params.config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+    config_dict['password'] = params.config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+    config_dict['hadoop.security.authentication'] = params.config['configurations']['core-site']['hadoop.security.authentication']
+    config_dict['hbase.master.kerberos.principal'] = ''
+    config_dict['hbase.security.authentication'] = params.config['configurations']['hbase-site']['hbase.security.authentication']
+    config_dict['hbase.zookeeper.property.clientPort'] = params.config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort']
+    config_dict['hbase.zookeeper.quorum'] = params.config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+    config_dict['zookeeper.znode.parent'] =  params.config['configurations']['hbase-site']['zookeeper.znode.parent']
+
+
+    repo= dict()
+    repo['isActive']                = "true"
+    repo['config']                  = json.dumps(config_dict)
+    repo['description']             = "hbase repo"
+    repo['name']                    = params.config['clusterName'] + "_hbase"
+    repo['repositoryType']          = "Hbase"
+    repo['assetType']               = '2'
+
+    data = json.dumps(repo)
+
+    return data

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
index ce0ab29..9ec5fbc 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
@@ -219,6 +219,7 @@
         <config-type>hadoop-env</config-type>
         <config-type>hadoop-policy</config-type>
         <config-type>hdfs-log4j</config-type>
+        <config-type>ranger-hdfs-plugin-properties</config-type>
       </configuration-dependencies>
     </service>
   </services>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 7b171a8..6b88acb 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -36,7 +36,7 @@ from hdfs_namenode import namenode
 from hdfs import hdfs
 import hdfs_rebalance
 from utils import failover_namenode
-
+from setup_ranger_hdfs import setup_ranger_hdfs
 
 class NameNode(Script):
 
@@ -50,6 +50,7 @@ class NameNode(Script):
     env.set_params(params)
     #TODO we need this for HA because of manual steps
     self.configure(env)
+    setup_ranger_hdfs(env)
 
   def prepare_rolling_upgrade(self, env):
     namenode_upgrade.prepare_rolling_upgrade()
@@ -70,6 +71,7 @@ class NameNode(Script):
 
     env.set_params(params)
     self.configure(env)
+    setup_ranger_hdfs(env)
     namenode(action="start", rolling_restart=rolling_restart, env=env)
 
     self.save_component_version_to_structured_out(params.stack_name)

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
index d844ecd..2201510 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
@@ -295,3 +295,17 @@ ttnode_heapsize = "1024m"
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+    # setting flag value for ranger hdfs plugin
+    enable_ranger_hdfs = False
+    user_input = config['configurations']['ranger-hdfs-plugin-properties']['ranger-hdfs-plugin-enabled']
+    if  user_input.lower() == 'yes':
+      enable_ranger_hdfs = True
+    elif user_input.lower() == 'no':
+      enable_ranger_hdfs = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
new file mode 100644
index 0000000..e16e90e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import fileinput
+import subprocess
+import json
+import re
+from resource_management import *
+from resource_management.libraries.functions.ranger_admin import Rangeradmin
+from resource_management.core.logger import Logger
+
+def setup_ranger_hdfs(env):
+    import params
+    env.set_params(params)
+
+    if params.has_ranger_admin:
+        try:
+            command = 'hdp-select status hadoop-client'
+            return_code, hdp_output = shell.call(command, timeout=20)
+        except Exception, e:
+            Logger.error(str(e))
+            raise Fail('Unable to execute hdp-select command to retrieve the version.')
+
+        if return_code != 0:
+            raise Fail('Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+        hdp_version = re.sub('hadoop-client - ', '', hdp_output)
+        match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
+
+        if match is None:
+            raise Fail('Failed to get extracted version')
+
+        file_path = '/usr/hdp/'+ hdp_version +'/ranger-hdfs-plugin/install.properties'
+
+        ranger_hdfs_dict = ranger_hdfs_properties(params)
+        hdfs_repo_data = hdfs_repo_properties(params)        
+
+        write_properties_to_file(file_path, ranger_hdfs_dict)
+
+        if params.enable_ranger_hdfs:            
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hdfs-plugin/ && sh enable-hdfs-plugin.sh')
+            ranger_adm_obj = Rangeradmin(url=ranger_hdfs_dict['POLICY_MGR_URL'])
+            response_code, response_recieved = ranger_adm_obj.call_pycurl_request(ranger_hdfs_dict['POLICY_MGR_URL'] + '/login.jsp', '', 'get', 'test:test')
+
+            if response_code is not None and response_code == 200:
+                repo = ranger_adm_obj.get_repository_by_name_pycurl(ranger_hdfs_dict['REPOSITORY_NAME'], 'hdfs', 'true', 'admin:admin')
+
+                if repo and repo['name'] == ranger_hdfs_dict['REPOSITORY_NAME']:
+                    Logger.info('HDFS Repository exist')
+                else:
+                    response = ranger_adm_obj.create_repository_pycurl(hdfs_repo_data, 'admin:admin')
+                    if response is not None:
+                        Logger.info('HDFS Repository created')
+                    else:
+                        Logger.info('HDFS Repository creation failed')
+            else:
+                Logger.info('Ranger service is not started on given host')
+        else:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hdfs-plugin/ && sh disable-hdfs-plugin.sh')
+
+        Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)            
+    else:
+        Logger.info('Ranger admin not installed')
+
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])
+
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+    # use quotes if setting has spaces #
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        # process lines that look like config settings #
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            # only change the first matching occurrence #
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                # don't change it if it is already set #
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    # Append the variable if it wasn't found #
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def ranger_hdfs_properties(params):
+    ranger_hdfs_properties = dict()
+
+    ranger_hdfs_properties['POLICY_MGR_URL']           = params.config['configurations']['admin-properties']['policymgr_external_url']
+    ranger_hdfs_properties['SQL_CONNECTOR_JAR']        = params.config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
+    ranger_hdfs_properties['XAAUDIT.DB.FLAVOUR']       = params.config['configurations']['admin-properties']['DB_FLAVOR']
+    ranger_hdfs_properties['XAAUDIT.DB.DATABASE_NAME'] = params.config['configurations']['admin-properties']['audit_db_name']
+    ranger_hdfs_properties['XAAUDIT.DB.USER_NAME']     = params.config['configurations']['admin-properties']['audit_db_user']
+    ranger_hdfs_properties['XAAUDIT.DB.PASSWORD']      = params.config['configurations']['admin-properties']['audit_db_password']
+    ranger_hdfs_properties['XAAUDIT.DB.HOSTNAME']      = params.config['configurations']['admin-properties']['db_host']
+    ranger_hdfs_properties['REPOSITORY_NAME']          = params.config['clusterName'] + '_hadoop'
+
+    ranger_hdfs_properties['XAAUDIT.DB.IS_ENABLED']   = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.DB.IS_ENABLED']
+
+    ranger_hdfs_properties['XAAUDIT.HDFS.IS_ENABLED'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.IS_ENABLED']
+    ranger_hdfs_properties['XAAUDIT.HDFS.DESTINATION_DIRECTORY'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.DESTINATION_DIRECTORY']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY']
+    ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_FILE'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FILE']
+    ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS']
+    ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hdfs_properties['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FILE'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FILE']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hdfs_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT'] = params.config['configurations']['ranger-hdfs-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT']
+    
+
+    ranger_hdfs_properties['SSL_KEYSTORE_FILE_PATH'] = params.config['configurations']['ranger-hdfs-plugin-properties']['SSL_KEYSTORE_FILE_PATH']
+    ranger_hdfs_properties['SSL_KEYSTORE_PASSWORD'] = params.config['configurations']['ranger-hdfs-plugin-properties']['SSL_KEYSTORE_PASSWORD']
+    ranger_hdfs_properties['SSL_TRUSTSTORE_FILE_PATH'] = params.config['configurations']['ranger-hdfs-plugin-properties']['SSL_TRUSTSTORE_FILE_PATH']
+    ranger_hdfs_properties['SSL_TRUSTSTORE_PASSWORD'] = params.config['configurations']['ranger-hdfs-plugin-properties']['SSL_TRUSTSTORE_PASSWORD']
+
+    return ranger_hdfs_properties
+
+
+def hdfs_repo_properties(params):
+
+    config_dict = dict()
+    config_dict['username'] = params.config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+    config_dict['password'] = params.config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+    config_dict['hadoop.security.authentication'] = params.config['configurations']['core-site']['hadoop.security.authentication']
+    config_dict['hadoop.security.authorization'] = params.config['configurations']['core-site']['hadoop.security.authorization']
+    config_dict['fs.default.name'] = params.config['configurations']['core-site']['fs.defaultFS']
+    config_dict['hadoop.security.auth_to_local'] = params.config['configurations']['core-site']['hadoop.security.auth_to_local']
+    config_dict['hadoop.rpc.protection'] = ''
+    config_dict['dfs.datanode.kerberos.principal'] =  ''
+    config_dict['dfs.namenode.kerberos.principal'] =  ''
+    config_dict['dfs.secondary.namenode.kerberos.principal'] =  ''
+    config_dict['commonNameForCertificate'] =  ''
+
+
+    repo= dict()
+    repo['isActive']                = "true"
+    repo['config']                  = json.dumps(config_dict)
+    repo['description']             = "hdfs repo"
+    repo['name']                    = params.config['clusterName'] + "_hadoop"
+    repo['repositoryType']          = "Hdfs"
+    repo['assetType']               = '1'
+
+    data = json.dumps(repo)
+
+    return data

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
index db48936..da662ce 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
@@ -288,6 +288,7 @@
         <config-type>hive-env</config-type>
         <config-type>webhcat-site</config-type>
         <config-type>webhcat-env</config-type>
+        <config-type>ranger-hive-plugin-properties</config-type>
       </configuration-dependencies>
     </service>
   </services>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
index 055916b..e40f8c0 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
@@ -27,6 +27,7 @@ from resource_management.libraries.functions.security_commons import build_expec
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
 from install_jars import install_tez_jars
+from setup_ranger_hive import setup_ranger_hive
 
 class HiveServer(Script):
 
@@ -36,7 +37,7 @@ class HiveServer(Script):
   def install(self, env):
     import params
     self.install_packages(env, exclude_packages=params.hive_exclude_packages)
-
+    setup_ranger_hive(env)    
 
   def configure(self, env):
     import params
@@ -55,7 +56,7 @@ class HiveServer(Script):
     # This function is needed in HDP 2.2, but it is safe to call in earlier versions.
     copy_tarballs_to_hdfs('mapreduce', params.tez_user, params.hdfs_user, params.user_group)
     copy_tarballs_to_hdfs('tez', params.tez_user, params.hdfs_user, params.user_group)
-
+    setup_ranger_hive(env)    
     hive_service( 'hiveserver2', action = 'start',
       rolling_restart=rolling_restart )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
index a027d52..ada3237 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
@@ -315,3 +315,15 @@ HdfsDirectory = functools.partial(
   kinit_path_local = kinit_path_local,
   bin_dir = hadoop_bin_dir
 )
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >=0:
+    # setting flag value for ranger hive plugin
+    enable_ranger_hive = False
+    user_input = config['configurations']['ranger-hive-plugin-properties']['ranger-hive-plugin-enabled']
+    if  user_input.lower() == 'yes':
+      enable_ranger_hive = True
+    elif user_input.lower() == 'no':
+      enable_ranger_hive = False
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
new file mode 100644
index 0000000..6a4dd65
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import fileinput
+import subprocess
+import json
+import re
+from resource_management import *
+from resource_management.libraries.functions.ranger_admin import Rangeradmin
+from resource_management.core.logger import Logger
+
+def setup_ranger_hive(env):
+    import params
+    env.set_params(params)
+
+    if params.has_ranger_admin:
+        try:
+            command = 'hdp-select status hive-server2'
+            return_code, hdp_output = shell.call(command, timeout=20)
+        except Exception, e:
+            Logger.error(str(e))
+            raise Fail('Unable to execute hdp-select command to retrieve the version.')
+
+        if return_code != 0:
+            raise Fail('Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+        hdp_version = re.sub('hive-server2 - ', '', hdp_output)
+        match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
+
+        if match is None:
+            raise Fail('Failed to get extracted version')
+
+        file_path = '/usr/hdp/'+ hdp_version +'/ranger-hive-plugin/install.properties'
+
+        ranger_hive_dict = ranger_hive_properties(params)
+        hive_repo_data = hive_repo_properties(params)
+
+        write_properties_to_file(file_path, ranger_hive_dict)
+    
+        if params.enable_ranger_hive:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hive-plugin/ && sh enable-hive-plugin.sh')
+            ranger_adm_obj = Rangeradmin(url=ranger_hive_dict['POLICY_MGR_URL'])
+            response_code, response_recieved = ranger_adm_obj.call_pycurl_request(ranger_hive_dict['POLICY_MGR_URL'] + '/login.jsp', '', 'get', 'test:test')
+
+            if response_code is not None and response_code == 200:
+                repo = ranger_adm_obj.get_repository_by_name_pycurl(ranger_hive_dict['REPOSITORY_NAME'], 'hive', 'true', 'admin:admin')
+
+                if repo and repo['name'] ==  ranger_hive_dict['REPOSITORY_NAME']:
+                    Logger.info('Hive Repository exist')
+                else:
+                    response = ranger_adm_obj.create_repository_pycurl(hive_repo_data, 'admin:admin')
+                    if response is not None:
+                        Logger.info('Hive Repository created')
+                    else:
+                        Logger.info('Hive Repository creation failed')
+            else:                        
+                Logger.info('Ranger service is not started on given host')
+        else:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-hive-plugin/ && sh disable-hive-plugin.sh')
+        
+        Execute(cmd, environment={'JAVA_HOME': params.java64_home}, logoutput=True)
+    else:
+        Logger.info('Ranger admin not installed')
+
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])
+
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+    # use quotes if setting has spaces #
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        # process lines that look like config settings #
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            # only change the first matching occurrence #
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                # don't change it if it is already set #
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    # Append the variable if it wasn't found #
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def ranger_hive_properties(params):
+    ranger_hive_properties = dict()
+
+    ranger_hive_properties['POLICY_MGR_URL']           = params.config['configurations']['admin-properties']['policymgr_external_url']
+    ranger_hive_properties['SQL_CONNECTOR_JAR']        = params.config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
+    ranger_hive_properties['XAAUDIT.DB.FLAVOUR']       = params.config['configurations']['admin-properties']['DB_FLAVOR']
+    ranger_hive_properties['XAAUDIT.DB.DATABASE_NAME'] = params.config['configurations']['admin-properties']['audit_db_name']
+    ranger_hive_properties['XAAUDIT.DB.USER_NAME']     = params.config['configurations']['admin-properties']['audit_db_user']
+    ranger_hive_properties['XAAUDIT.DB.PASSWORD']      = params.config['configurations']['admin-properties']['audit_db_password']
+    ranger_hive_properties['XAAUDIT.DB.HOSTNAME']      = params.config['configurations']['admin-properties']['db_host']
+    ranger_hive_properties['REPOSITORY_NAME']          = params.config['clusterName'] + '_hive'
+
+    ranger_hive_properties['XAAUDIT.DB.IS_ENABLED']   = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.DB.IS_ENABLED']
+
+    ranger_hive_properties['XAAUDIT.HDFS.IS_ENABLED'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.IS_ENABLED']
+    ranger_hive_properties['XAAUDIT.HDFS.DESTINATION_DIRECTORY'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.DESTINATION_DIRECTORY']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY']
+    ranger_hive_properties['XAAUDIT.HDFS.DESTINTATION_FILE'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FILE']
+    ranger_hive_properties['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS']
+    ranger_hive_properties['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hive_properties['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FILE'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FILE']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS']
+    ranger_hive_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT'] = params.config['configurations']['ranger-hive-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT']
+    
+
+    ranger_hive_properties['SSL_KEYSTORE_FILE_PATH'] = params.config['configurations']['ranger-hive-plugin-properties']['SSL_KEYSTORE_FILE_PATH']
+    ranger_hive_properties['SSL_KEYSTORE_PASSWORD'] = params.config['configurations']['ranger-hive-plugin-properties']['SSL_KEYSTORE_PASSWORD']
+    ranger_hive_properties['SSL_TRUSTSTORE_FILE_PATH'] = params.config['configurations']['ranger-hive-plugin-properties']['SSL_TRUSTSTORE_FILE_PATH']
+    ranger_hive_properties['SSL_TRUSTSTORE_PASSWORD'] = params.config['configurations']['ranger-hive-plugin-properties']['SSL_TRUSTSTORE_PASSWORD']
+
+    ranger_hive_properties['UPDATE_XAPOLICIES_ON_GRANT_REVOKE'] = params.config['configurations']['ranger-hive-plugin-properties']['UPDATE_XAPOLICIES_ON_GRANT_REVOKE']
+
+    return ranger_hive_properties
+
+def hive_repo_properties(params):
+
+    hive_host = params.config['clusterHostInfo']['hive_server_host'][0]
+
+    config_dict = dict()
+    config_dict['username'] = params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+    config_dict['password'] = params.config['configurations']['ranger-hive-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']
+    config_dict['jdbc.driverClassName'] = 'org.apache.hive.jdbc.HiveDriver'
+    config_dict['jdbc.url'] = 'jdbc:hive2://' + hive_host + ':10000'
+    config_dict['commonNameForCertificate'] = ''
+
+    repo= dict()
+    repo['isActive']                = "true"
+    repo['config']                  = json.dumps(config_dict)
+    repo['description']             = "hive repo"
+    repo['name']                    = params.config['clusterName'] + '_hive'
+    repo['repositoryType']          = "Hive"
+    repo['assetType']               = '3'
+
+    data = json.dumps(repo)
+
+    return data

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
new file mode 100644
index 0000000..b4f8ce6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/configuration/ranger-knox-plugin-properties.xml
@@ -0,0 +1,157 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+        <property>
+                <name>ranger-knox-plugin-enabled</name>
+                <value>Yes</value>
+                <description>Enable ranger knox plugin ?</description>
+        </property>
+
+	<property>
+		<name>REPOSITORY_CONFIG_USERNAME</name>
+		<value>admin</value>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>REPOSITORY_CONFIG_PASSWORD</name>
+		<value>admin-password</value>
+		<property-type>PASSWORD</property-type>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>KNOX_HOME</name>
+		<value>/usr/hdp/current/knox-server</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.DB.IS_ENABLED</name>
+		<value>true</value>
+		<description></description>
+	</property>
+
+
+	<property>
+		<name>XAAUDIT.DB.HOSTNAME</name>
+		<value>localhost</value>
+		<description></description>
+	</property>	
+
+	<property>
+		<name>XAAUDIT.HDFS.IS_ENABLED</name>
+		<value>false</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINATION_DIRECTORY</name>
+		<value>hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY</name>
+		<value>__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FILE</name>
+		<value>%hostname%-audit.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS</name>
+		<value>900</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>86400</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FILE</name>
+		<value>%time:yyyyMMdd-HHmm.ss%.log</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS</name>
+		<value>60</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS</name>
+		<value>600</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT</name>
+		<value>10</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-keystore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_KEYSTORE_PASSWORD</name>
+		<value>myKeyFilePassword</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_FILE_PATH</name>
+		<value>/etc/hadoop/conf/ranger-plugin-truststore.jks</value>
+		<description></description>
+	</property>
+
+	<property>
+		<name>SSL_TRUSTSTORE_PASSWORD</name>
+		<value>changeit</value>
+		<description></description>
+	</property>
+
+</configuration>	

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/metainfo.xml b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/metainfo.xml
index ce4448f..810d3ff 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/metainfo.xml
@@ -81,6 +81,7 @@
         <config-type>gateway-site</config-type>
         <config-type>gateway-log4j</config-type>
         <config-type>topology</config-type>
+        <config-type>ranger-knox-plugin-properties</config-type>
       </configuration-dependencies>
     </service>
   </services>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
index 660a630..b447003 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
@@ -25,6 +25,7 @@ import sys
 
 from knox import knox
 from ldap import ldap
+from setup_ranger_knox import setup_ranger_knox
 
 class KnoxGateway(Script):
 
@@ -39,6 +40,7 @@ class KnoxGateway(Script):
     File(format('{knox_conf_dir}/topologies/sandbox.xml'),
          action = "delete",
     )
+    setup_ranger_knox(env)
 
   def configure(self, env):
     import params
@@ -52,6 +54,7 @@ class KnoxGateway(Script):
     self.configure(env)
     daemon_cmd = format('{knox_bin} start')
     no_op_test = format('ls {knox_pid_file} >/dev/null 2>&1 && ps -p `cat {knox_pid_file}` >/dev/null 2>&1')
+    setup_ranger_knox(env)
     Execute(daemon_cmd,
             user=params.knox_user,
             environment={'JAVA_HOME': params.java_home},

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py
index 3c91992..768d94d 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params.py
@@ -139,4 +139,15 @@ if security_enabled:
   _hostname_lowercase = config['hostname'].lower()
   knox_principal_name = config['configurations']['knox-env']['knox_principal_name'].replace('_HOST',_hostname_lowercase)
 
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
 
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+    # Setting Flag value for ranger hbase plugin
+    enable_ranger_knox = False
+    user_input = config['configurations']['ranger-knox-plugin-properties']['ranger-knox-plugin-enabled']
+    if user_input.lower() == 'yes':
+      enable_ranger_knox = True
+    elif user_input.lower() == 'no':
+      enable_ranger_knox = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
new file mode 100644
index 0000000..0e90c48
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import fileinput
+import subprocess
+import json
+import re
+from resource_management import *
+from resource_management.libraries.functions.ranger_admin import Rangeradmin
+from resource_management.core.logger import Logger
+
+def setup_ranger_knox(env):
+    import params
+    env.set_params(params)
+
+    try:
+        command = 'hdp-select status knox-server'
+        return_code, hdp_output = shell.call(command, timeout=20)
+    except Exception, e:
+        Logger.error(str(e))
+        raise Fail('Unable to execute hdp-select command to retrieve the version.')
+
+    if return_code != 0:
+        raise Fail('Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+    hdp_version = re.sub('knox-server - ', '', hdp_output)
+    match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
+
+    if match is None:
+        raise Fail('Failed to get extracted version')
+
+    file_path = '/usr/hdp/'+ hdp_version +'/ranger-knox-plugin/install.properties'
+
+    if params.has_ranger_admin:
+
+        ranger_knox_dict = ranger_knox_properties(params)
+        knox_repo_data = knox_repo_properties(params)       
+
+        write_properties_to_file(file_path, ranger_knox_dict)
+
+        if params.enable_ranger_knox:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-knox-plugin/ && sh enable-knox-plugin.sh')
+            ranger_adm_obj = Rangeradmin(url=ranger_knox_dict['POLICY_MGR_URL'])
+            response_code, response_recieved = ranger_adm_obj.call_pycurl_request(ranger_knox_dict['POLICY_MGR_URL'] + '/login.jsp', '', 'get', 'test:test')
+
+            if response_code is not None and response_code == 200:
+                repo = ranger_adm_obj.get_repository_by_name_pycurl(ranger_knox_dict['REPOSITORY_NAME'], 'knox', 'true', 'admin:admin')
+
+                if repo and repo['name'] == ranger_knox_dict['REPOSITORY_NAME']:
+                    Logger.info('Knox Repository exist')
+                else:
+                    response = ranger_adm_obj.create_repository_pycurl(knox_repo_data, 'admin:admin')
+                    if response is not None:
+                        Logger.info('Knox Repository created')
+                    else:
+                        Logger.info('Knox Repository creation failed')
+            else:
+                Logger.info('Ranger service is not started on given host')
+        else:
+            cmd = format('cd /usr/hdp/{hdp_version}/ranger-knox-plugin/ && sh disable-knox-plugin.sh')
+
+        Execute(cmd, environment={'JAVA_HOME': params.java_home}, logoutput=True)
+    else:
+        Logger.info('Ranger admin not installed') 
+
+
+def write_properties_to_file(file_path, value):
+    for key in value:
+      modify_config(file_path, key, value[key])
+
+
+def modify_config(filepath, variable, setting):
+    var_found = False
+    already_set = False
+    V=str(variable)
+    S=str(setting)
+    # use quotes if setting has spaces #
+    if ' ' in S:
+        S = '%s' % S
+
+    for line in fileinput.input(filepath, inplace = 1):
+        # process lines that look like config settings #
+        if not line.lstrip(' ').startswith('#') and '=' in line:
+            _infile_var = str(line.split('=')[0].rstrip(' '))
+            _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+            # only change the first matching occurrence #
+            if var_found == False and _infile_var.rstrip(' ') == V:
+                var_found = True
+                # don't change it if it is already set #
+                if _infile_set.lstrip(' ') == S:
+                    already_set = True
+                else:
+                    line = "%s=%s\n" % (V, S)
+
+        sys.stdout.write(line)
+
+    # Append the variable if it wasn't found #
+    if not var_found:
+        with open(filepath, "a") as f:
+            f.write("%s=%s\n" % (V, S))
+    elif already_set == True:
+        pass
+    else:
+        pass
+
+    return
+
+def ranger_knox_properties(params):
+    ranger_knox_properties = dict()
+
+    ranger_knox_properties['POLICY_MGR_URL']           = params.config['configurations']['admin-properties']['policymgr_external_url']
+    ranger_knox_properties['SQL_CONNECTOR_JAR']        = params.config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
+    ranger_knox_properties['XAAUDIT.DB.FLAVOUR']       = params.config['configurations']['admin-properties']['DB_FLAVOR']
+    ranger_knox_properties['XAAUDIT.DB.DATABASE_NAME'] = params.config['configurations']['admin-properties']['audit_db_name']
+    ranger_knox_properties['XAAUDIT.DB.USER_NAME']     = params.config['configurations']['admin-properties']['audit_db_user']
+    ranger_knox_properties['XAAUDIT.DB.PASSWORD']      = params.config['configurations']['admin-properties']['audit_db_password']
+    ranger_knox_properties['XAAUDIT.DB.HOSTNAME']      = params.config['configurations']['admin-properties']['db_host']
+    ranger_knox_properties['REPOSITORY_NAME']          = params.config['clusterName'] + '_knox'
+
+    ranger_knox_properties['KNOX_HOME'] = params.config['configurations']['ranger-knox-plugin-properties']['KNOX_HOME']
+
+    ranger_knox_properties['XAAUDIT.DB.IS_ENABLED']   = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.DB.IS_ENABLED']
+
+    ranger_knox_properties['XAAUDIT.HDFS.IS_ENABLED'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.IS_ENABLED']
+    ranger_knox_properties['XAAUDIT.HDFS.DESTINATION_DIRECTORY'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.DESTINATION_DIRECTORY']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY']
+    ranger_knox_properties['XAAUDIT.HDFS.DESTINTATION_FILE'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FILE']
+    ranger_knox_properties['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS']
+    ranger_knox_properties['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS']
+    ranger_knox_properties['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FILE'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FILE']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS']
+    ranger_knox_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT'] = params.config['configurations']['ranger-knox-plugin-properties']['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT']
+    
+
+    ranger_knox_properties['SSL_KEYSTORE_FILE_PATH'] = params.config['configurations']['ranger-knox-plugin-properties']['SSL_KEYSTORE_FILE_PATH']
+    ranger_knox_properties['SSL_KEYSTORE_PASSWORD'] = params.config['configurations']['ranger-knox-plugin-properties']['SSL_KEYSTORE_PASSWORD']
+    ranger_knox_properties['SSL_TRUSTSTORE_FILE_PATH'] = params.config['configurations']['ranger-knox-plugin-properties']['SSL_TRUSTSTORE_FILE_PATH']
+    ranger_knox_properties['SSL_TRUSTSTORE_PASSWORD'] = params.config['configurations']['ranger-knox-plugin-properties']['SSL_TRUSTSTORE_PASSWORD']
+    
+
+    return ranger_knox_properties    
+
+def knox_repo_properties(params):
+
+    knoxHost = params.config['clusterHostInfo']['knox_gateway_hosts'][0]
+    knoxPort = params.config['configurations']['gateway-site']['gateway.port']
+
+    config_dict = dict()
+    config_dict['username'] = params.config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+    config_dict['password'] = params.config['configurations']['ranger-knox-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+    config_dict['knox.url'] = 'https://' + knoxHost + ':' + str(knoxPort) +'/gateway/admin/api/v1/topologies'
+    config_dict['commonNameForCertificate'] = ''
+
+    repo= dict()
+    repo['isActive']                = "true"
+    repo['config']                  = json.dumps(config_dict)
+    repo['description']             = "knox repo"
+    repo['name']                    = params.config['clusterName'] + "_knox"
+    repo['repositoryType']          = "Knox"
+    repo['assetType']               = '5'
+
+    data = json.dumps(repo)
+
+    return data

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
new file mode 100644
index 0000000..687ea15
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/admin-properties.xml
@@ -0,0 +1,180 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+
+	<property>
+		<name>DB_FLAVOR</name>
+		<value>MYSQL</value>
+		<description>The database type to be used (mysql/oracle)</description>
+	</property>
+
+        <property>
+                <name>SQL_COMMAND_INVOKER</name>
+                <value>mysql</value>
+                <description>The executable path to be used to invoke command-line MYSQL</description>
+        </property>
+
+        <property>
+                <name>SQL_CONNECTOR_JAR</name>
+                <value>/usr/share/java/mysql-connector-java.jar</value>
+                <description>Location of DB client library (please check the location of the jar file)</description>
+        </property>
+
+        <property>
+                <name>db_root_user</name>
+                <value>root</value>
+                <property-type>USER</property-type>
+                <description>Database admin user</description>
+        </property>
+
+        <property require-input="true">
+                <name>db_root_password</name>
+                <value>vagrant</value>
+                <property-type>PASSWORD</property-type>
+                <description>Database password for the database admin user-id</description>
+        </property>
+
+        <property>
+                <name>db_host</name>
+                <value>localhost</value>
+                <description>Database host</description>
+        </property>
+
+        <property>
+                <name>db_name</name>
+                <value>ranger</value>
+                <description>Database name</description>
+        </property>
+
+        <property>
+                <name>db_user</name>
+                <value>rangeradmin</value>
+                <property-type>USER</property-type>
+                <description>Database user-id used for the XASecure schema</description>
+        </property>
+
+        <property require-input="true">
+                <name>db_password</name>
+                <value>rangeradmin</value>
+                <property-type>PASSWORD</property-type>
+                <description>Database password for the XASecure schema</description>
+        </property>
+
+        <property>
+                <name>audit_db_name</name>
+                <value>ranger_audit</value>
+                <description>Audit database name</description>
+        </property>
+
+        <property>
+                <name>audit_db_user</name>
+                <value>rangerlogger</value>
+                <property-type>USER</property-type>
+                <description>Database user-id for storing auditlog information</description>
+        </property>
+
+        <property require-input="true">
+                <name>audit_db_password</name>
+                <value>rangerlogger</value>
+                <property-type>PASSWORD</property-type>
+                <description>Database password for storing auditlog information</description>
+        </property>
+
+        <property>
+                <name>policymgr_external_url</name>
+                <value>http://localhost:6080</value>
+                <description>Policy Manager external url</description>
+        </property>
+
+        <property>
+                <name>policymgr_http_enabled</name>
+                <value>true</value>
+                <description>HTTP Enabled</description>
+        </property>
+
+        <property>
+                <name>authentication_method</name>
+                <value>UNIX</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>remoteLoginEnabled</name>
+                <value>true</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>authServiceHostName</name>
+                <value>localhost</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>authServicePort</name>
+                <value>5151</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_url</name>
+                <value>"ldap://71.127.43.33:389"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_userDNpattern</name>
+                <value>"uid={0},ou=users,dc=xasecure,dc=net"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_groupSearchBase</name>
+                <value>"ou=groups,dc=xasecure,dc=net"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_groupSearchFilter</name>
+                <value>"(member=uid={0},ou=users,dc=xasecure,dc=net)"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_groupRoleAttribute</name>
+                <value>"cn"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_ad_domain</name>
+                <value>"xasecure.net"</value>
+                <description></description>
+        </property>
+
+        <property>
+                <name>xa_ldap_ad_url</name>
+                <value>"ldap://ad.xasecure.net:389"</value>
+                <description></description>
+        </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml
new file mode 100644
index 0000000..1d8865a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/ranger-env.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+    <property>
+        <name>unix_user</name>
+        <value>ranger</value>
+        <property-type>USER</property-type>
+        <description>Unix username</description>
+    </property>
+
+    <property>
+        <name>unix_group</name>
+        <value>ranger</value>
+        <property-type>GROUP</property-type>
+        <description>Unix group</description>
+    </property>
+
+    <property>
+        <name>ranger_admin_log_dir</name>
+        <value>/var/log/ranger/admin</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>ranger_usersync_log_dir</name>
+        <value>/var/log/ranger/usersync</value>
+        <description></description>
+    </property>    
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/usersync-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/usersync-properties.xml b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/usersync-properties.xml
new file mode 100644
index 0000000..67d1846
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/configuration/usersync-properties.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false">
+	<property>
+		<name>SYNC_SOURCE</name>
+		<value>unix</value>
+		<description></description>
+	</property>
+	<property>
+		<name>MIN_UNIX_USER_ID_TO_SYNC</name>
+		<value>1000</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_INTERVAL</name>
+		<value>1</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_URL</name>
+		<value>ldap://localhost:389</value>
+		<description>a sample value would be:  ldap://ldap.example.com:389</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_BIND_DN</name>
+		<value>cn=admin,dc=xasecure,dc=net</value>
+		<description>a sample value would be cn=admin,ou=users,dc=hadoop,dc=apache,dc-org</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_BIND_PASSWORD</name>
+		<value>admin321</value>
+		<description></description>
+	</property>
+	<property>
+		<name>CRED_KEYSTORE_FILENAME</name>
+		<value>/usr/lib/xausersync/.jceks/xausersync.jceks</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_SEARCH_BASE</name>
+		<value>ou=users,dc=xasecure,dc=net</value>
+		<description>sample value would be ou=users,dc=hadoop,dc=apache,dc=org</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_SEARCH_SCOPE</name>
+		<value>sub</value>
+		<description>default value: sub</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_OBJECT_CLASS</name>
+		<value>person</value>
+		<description>default value: person</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_SEARCH_FILTER</name>
+		<value></value>
+		<description>default value is empty</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_NAME_ATTRIBUTE</name>
+		<value>cn</value>
+		<description>default value: cn</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE</name>
+		<value>memberof,ismemberof</value>
+		<description></description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_USERNAME_CASE_CONVERSION</name>
+		<value>lower</value>
+		<description>possible values:  none, lower, upper</description>
+	</property>
+	<property>
+		<name>SYNC_LDAP_GROUPNAME_CASE_CONVERSION</name>
+		<value>lower</value>
+		<description>possible values:  none, lower, upper</description>
+	</property>
+	<property>
+		<name>logdir</name>
+		<value>logs</value>
+		<description>user sync log path</description>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/31cdf9fa/ambari-server/src/main/resources/common-services/RANGER/0.4.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/metainfo.xml b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/metainfo.xml
new file mode 100644
index 0000000..9be04cf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/metainfo.xml
@@ -0,0 +1,78 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>RANGER</name>
+            <displayName>Ranger</displayName>
+            <comment>Comprehensive security for Hadoop</comment>
+            <version>0.4.0</version>
+            <components>
+                
+                <component>
+                    <name>RANGER_ADMIN</name>
+                    <displayName>Ranger Admin</displayName>
+                    <category>MASTER</category>
+                    <cardinality>1</cardinality>
+                    <commandScript>
+                        <script>scripts/ranger_admin.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                </component>
+
+                <component>
+                    <name>RANGER_USERSYNC</name>
+                    <displayName>Ranger Usersync</displayName>
+                    <category>MASTER</category>
+                    <cardinality>1</cardinality>
+                    <commandScript>
+                        <script>scripts/ranger_usersync.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>           
+                </component>
+
+            </components>              
+
+            <osSpecifics>
+                <osSpecific>
+                    <osFamily>redhat5,redhat6,suse11,ubuntu12</osFamily>
+                    <packages>
+                        <package>
+                            <name>ranger-admin</name>                                
+                        </package>
+                        <package>
+                            <name>ranger-usersync</name>
+                        </package>                           
+                    </packages>                        
+                </osSpecific>
+            </osSpecifics>
+
+            <configuration-dependencies>
+                <config-type>admin-properties</config-type>
+                <config-type>usersync-properties</config-type>
+            </configuration-dependencies>						
+
+        </service>
+    </services>
+</metainfo>