You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/01/31 20:50:54 UTC

[29/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py
new file mode 100644
index 0000000..e6e7fb9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py
new file mode 100644
index 0000000..95f3e30
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management import *
+import sys
+
+def hbase(type=None # 'master' or 'regionserver' or 'client'
+              ):
+  import params
+  
+  Directory( params.conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      recursive = True
+  )
+  
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  XmlConfig( "hdfs-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+  
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+      configurations = params.config['configurations']['hbase-policy'],
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  
+  hbase_TemplateConfig( 'hbase-env.sh')     
+       
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if type == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{type}_jaas.conf"))
+  
+  if type != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      recursive = True
+    )
+  
+    Directory ( [params.tmp_dir, params.log_dir],
+      owner = params.hbase_user,
+      recursive = True
+    )
+
+  if (params.log4j_props != None):
+    PropertiesFile('log4j.properties',
+      dir=params.conf_dir,
+      properties=params.log4j_props,
+      mode=0664,
+      owner=params.hbase_user,
+      group=params.user_group,
+    )
+  elif (os.path.exists(format("{params.conf_dir}/log4j.properties"))):
+    File(format("{params.conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hbase_user
+    )
+
+def hbase_TemplateConfig(name, 
+                         tag=None
+                         ):
+  import params
+
+  TemplateConfig( format("{conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py
new file mode 100644
index 0000000..0f2a1bc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+
+         
+class HbaseClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    
+    hbase(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+#for tests
+def main():
+  command_type = 'install'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/'
+  stdoutfile = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stdoutfile]
+  
+  HbaseClient().execute()
+  
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000..dba89fa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+
+  if params.hbase_drain_only == True:
+    print "TBD: Remove host from draining"
+    pass
+
+  else:
+
+    kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};") if params.security_enabled else ""
+
+    hosts = params.hbase_excluded_hosts.split(",")
+    for host in hosts:
+      if host:
+        print "TBD: Add host to draining"
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {conf_dir} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass
+
+
+pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py
new file mode 100644
index 0000000..9c78e5c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+
+         
+class HbaseMaster(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='master')
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'master',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'master',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-master.pid")
+    check_process_status(pid_file)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_decommission(env)
+
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
+  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/HBASE/package'
+  stroutputf = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stroutputf]
+  
+  HbaseMaster().execute()
+  
+if __name__ == "__main__":
+  HbaseMaster().execute()
+  #main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..2d91e75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+
+         
+class HbaseRegionServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='regionserver')
+      
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'regionserver',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-regionserver.pid")
+    check_process_status(pid_file)
+    
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "stop"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseRegionServer().execute()
+  
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py
new file mode 100644
index 0000000..7a1248b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+    
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {conf_dir}")
+    pid_file = format("{pid_dir}/hbase-hbase-{role}.pid")
+    
+    daemon_cmd = None
+    no_op_test = None
+    
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role} && rm -f {pid_file}")
+
+    if daemon_cmd is not None:
+      Execute ( daemon_cmd,
+        not_if = no_op_test,
+        user = params.hbase_user
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
new file mode 100644
index 0000000..7db6306
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from functions import calc_xmn_from_xms
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/hbase/conf"
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+region_drainer = "/usr/lib/hbase/bin/region_drainer.rb"
+hbase_cmd = "/usr/lib/hbase/bin/hbase"
+hbase_excluded_hosts = default("/commandParams/excluded_hosts", "")
+hbase_drain_only = default("/commandParams/mark_draining_only", "")
+
+hbase_user = config['configurations']['global']['hbase_user']
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+user_group = config['configurations']['global']['user_group']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+log_dir = config['configurations']['global']['hbase_log_dir']
+master_heapsize = config['configurations']['global']['hbase_master_heapsize']
+
+regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+
+client_jaas_config_file = default('hbase_client_jaas_config_file', format("{conf_dir}/hbase_client_jaas.conf"))
+master_jaas_config_file = default('hbase_master_jaas_config_file', format("{conf_dir}/hbase_master_jaas.conf"))
+regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{conf_dir}/hbase_regionserver_jaas.conf"))
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
+
+rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+
+smoke_test_user = config['configurations']['global']['smokeuser']
+smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
+service_check_data = get_unique_id_and_date()
+
+if security_enabled:
+  
+  _use_hostname_in_principal = default('instance_name', True)
+  _master_primary_name = config['configurations']['global']['hbase_master_primary_name']
+  _hostname = config['hostname']
+  _kerberos_domain = config['configurations']['global']['kerberos_domain']
+  _master_principal_name = config['configurations']['global']['hbase_master_principal_name']
+  _regionserver_primary_name = config['configurations']['global']['hbase_regionserver_primary_name']
+  
+  if _use_hostname_in_principal:
+    master_jaas_princ = format("{_master_primary_name}/{_hostname}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}/{_hostname}@{_kerberos_domain}")
+  else:
+    master_jaas_princ = format("{_master_principal_name}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}@{_kerberos_domain}")
+    
+master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#log4j.properties
+if ('hbase-log4j' in config['configurations']):
+  log4j_props = config['configurations']['hbase-log4j']
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
new file mode 100644
index 0000000..ff6d0ed
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+
+
+class HbaseServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    test_cmd = format("fs -test -e {output_file}")
+    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
+    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
+  
+    File( '/tmp/hbaseSmokeVerify.sh',
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
+      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
+      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+      )
+
+    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
+  
+    Execute( servicecheckcmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+  
+    Execute ( smokeverifycmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+    
+def main():
+  import sys
+  command_type = 'perform'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseServiceCheck().execute()
+  
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
new file mode 100644
index 0000000..c9b20ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
new file mode 100644
index 0000000..2583f44
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
new file mode 100644
index 0000000..9f2b616
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8660
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8660
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8660
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8660

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2
new file mode 100644
index 0000000..b8505b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2
@@ -0,0 +1,82 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..61fe62f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..696718e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,5 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2
new file mode 100644
index 0000000..9102d35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,21 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2
new file mode 100644
index 0000000..722cfcc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,8 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100644
index 0000000..cb9b7b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,8 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2
new file mode 100644
index 0000000..b22ae5f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2
@@ -0,0 +1,2 @@
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HCATALOG/metainfo.xml
deleted file mode 100644
index df5e901..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.12.0.2.0.6.1</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..4fd0b22
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,305 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hadoop.root.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>.</value>
+  </property>
+  <property>
+    <name>hadoop.log.file</name>
+    <value>hadoop.log</value>
+  </property>
+  <property>
+    <name>log4j.rootLogger</name>
+    <value>${hadoop.root.logger}, EventCounter</value>
+  </property>
+  <property>
+    <name>log4j.threshhold</name>
+    <value>ALL</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.File</name>
+    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.console</name>
+    <value>org.apache.log4j.ConsoleAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.target</name>
+    <value>System.err</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.console.layout.ConversionPattern</name>
+    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.taskid</name>
+    <value>null</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.iscleanup</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.noKeepSplits</name>
+    <value>4</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.totalLogFileSize</name>
+    <value>100</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.purgeLogSplits</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hadoop.tasklog.logsRetainHours</name>
+    <value>12</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA</name>
+    <value>org.apache.hadoop.mapred.TaskLogAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.taskId</name>
+    <value>${hadoop.tasklog.taskid}</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.isCleanup</name>
+    <value>${hadoop.tasklog.iscleanup}</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.totalLogFileSize</name>
+    <value>${hadoop.tasklog.totalLogFileSize}</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.TLA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>hadoop.security.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>hadoop.security.log.maxfilesize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>hadoop.security.log.maxbackupindex</name>
+    <value>20</value>
+  </property>
+  <property>
+    <name>log4j.category.SecurityLogger</name>
+    <value>${hadoop.security.logger}</value>
+  </property>
+  <property>
+    <name>hadoop.security.log.file</name>
+    <value>SecurityAuth.audit</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.File</name>
+    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAS.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.File</name>
+    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.MaxFileSize</name>
+    <value>${hadoop.security.log.maxfilesize}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFAS.MaxBackupIndex</name>
+    <value>${hadoop.security.log.maxbackupindex}</value>
+  </property>
+  <property>
+    <name>hdfs.audit.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
+    <value>${hdfs.audit.logger}</value>
+  </property>
+  <property>
+    <name>log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.File</name>
+    <value>${hadoop.log.dir}/hdfs-audit.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.DRFAAUDIT.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>mapred.audit.logger</name>
+    <value>INFO,console</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.mapred.AuditLogger</name>
+    <value>${mapred.audit.logger}</value>
+  </property>
+  <property>
+    <name>log4j.additivity.org.apache.hadoop.mapred.AuditLogger</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT</name>
+    <value>org.apache.log4j.DailyRollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.File</name>
+    <value>${hadoop.log.dir}/mapred-audit.log</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %p %c{2}: %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.MRAUDIT.DatePattern</name>
+    <value>.yyyy-MM-dd</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA</name>
+    <value>org.apache.log4j.RollingFileAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.File</name>
+    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.MaxFileSize</name>
+    <value>256MB</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.MaxBackupIndex</name>
+    <value>10</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout</name>
+    <value>org.apache.log4j.PatternLayout</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} - %m%n</value>
+  </property>
+  <property>
+    <name>log4j.appender.RFA.layout.ConversionPattern</name>
+    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
+  </property>
+  <property>
+    <name>hadoop.metrics.log.level</name>
+    <value>INFO</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.apache.hadoop.metrics2</name>
+    <value>${hadoop.metrics.log.level}</value>
+  </property>
+  <property>
+    <name>log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service</name>
+    <value>ERROR</value>
+  </property>
+  <property>
+    <name>log4j.appender.NullAppender</name>
+    <value>org.apache.log4j.varia.NullAppender</value>
+  </property>
+  <property>
+    <name>log4j.appender.EventCounter</name>
+    <value>org.apache.hadoop.log.metrics.EventCounter</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
index aee8236..296b0c1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
@@ -22,7 +22,7 @@
 
 <configuration>
 
-<!-- file system properties -->
+  <!-- file system properties -->
 
   <property>
     <name>dfs.namenode.name.dir</name>
@@ -60,11 +60,11 @@
     <name>dfs.datanode.data.dir</name>
     <value>/hadoop/hdfs/data</value>
     <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
+      should store its blocks.  If this is a comma-delimited
+      list of directories, then data will be stored in all named
+      directories, typically on different devices.
+      Directories that do not exist are ignored.
+    </description>
     <final>true</final>
   </property>
 
@@ -72,21 +72,21 @@
     <name>dfs.hosts.exclude</name>
     <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
   </property>
 
-<!--
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
--->
+  <!--
+    <property>
+      <name>dfs.hosts</name>
+      <value>/etc/hadoop/conf/dfs.include</value>
+      <description>Names a file that contains a list of hosts that are
+      permitted to connect to the namenode. The full pathname of the file
+      must be specified.  If the value is empty, all hosts are
+      permitted.</description>
+    </property>
+  -->
 
   <property>
     <name>dfs.namenode.checkpoint.dir</name>
@@ -130,14 +130,14 @@
     <name>dfs.replication.max</name>
     <value>50</value>
     <description>Maximal block replication.
-  </description>
+    </description>
   </property>
 
   <property>
     <name>dfs.replication</name>
     <value>3</value>
     <description>Default block replication.
-  </description>
+    </description>
   </property>
 
   <property>
@@ -156,21 +156,21 @@
     <name>dfs.namenode.safemode.threshold-pct</name>
     <value>1.0f</value>
     <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.namenode.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
+      Specifies the percentage of blocks that should satisfy
+      the minimal replication requirement defined by dfs.namenode.replication.min.
+      Values less than or equal to 0 mean not to start in safe mode.
+      Values greater than 1 will make safe mode permanent.
+    </description>
   </property>
 
   <property>
     <name>dfs.datanode.balance.bandwidthPerSec</name>
     <value>6250000</value>
     <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
+      Specifies the maximum amount of bandwidth that each datanode
+      can utilize for the balancing purpose in term of
+      the number of bytes per second.
+    </description>
   </property>
 
   <property>
@@ -222,113 +222,113 @@
   <property>
     <name>dfs.namenode.http-address</name>
     <value>localhost:50070</value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.transfer.threads</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>fs.permissions.umask-mode</name>
-<value>022</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.enabled</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.superusergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
+    <description>The name of the default file system.  Either the
+      literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
     <description>
-        Kerberos principal name for the secondary NameNode.
+      The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
     </description>
   </property>
 
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+  </property>
 
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>40</value>
+    <description>The number of server threads for the namenode.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>1024</value>
+    <description>PRIVATE CONFIG VARIABLE</description>
+  </property>
+
+  <!-- Permissions configuration -->
+
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+    <description>
+      The octal umask used when creating files and directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+    <description>
+      If "true", enable permission checking in HDFS.
+      If "false", permission checking is turned off,
+      but all other behavior is unchanged.
+      Switching from one parameter value to the other does not change the mode,
+      owner or group of files or directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>100</value>
+    <description>Added to grow Queue size so that more client connections are allowed</description>
+  </property>
+
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
+      If "true", access tokens are used as capabilities for accessing datanodes.
+      If "false", no access tokens are checked on accessing datanodes.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value></value>
+    <description>
+      Kerberos principal name for the NameNode
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.principal</name>
+    <value></value>
+    <description>
+      Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+  <!--
+    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+  -->
   <property>
     <name>dfs.namenode.kerberos.https.principal</name>
     <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+    <description>The Kerberos principal for the host that the NameNode runs on.</description>
 
   </property>
 
@@ -368,64 +368,64 @@ Kerberos principal name for the NameNode
   <property>
     <name>dfs.datanode.kerberos.principal</name>
     <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    <description>
+      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
     </description>
   </property>
 
   <property>
     <name>dfs.namenode.keytab.file</name>
     <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
+    <description>
+      Combined keytab file containing the namenode service and host principals.
     </description>
   </property>
 
   <property>
     <name>dfs.secondary.namenode.keytab.file</name>
     <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
+    <description>
+      Combined keytab file containing the namenode service and host principals.
     </description>
   </property>
 
   <property>
     <name>dfs.datanode.keytab.file</name>
     <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
+    <description>
+      The filename of the keytab file for the DataNode.
     </description>
   </property>
 
   <property>
     <name>dfs.namenode.https-address</name>
     <value>localhost:50470</value>
-  <description>The https address where namenode binds</description>
+    <description>The https address where namenode binds</description>
 
   </property>
 
   <property>
     <name>dfs.datanode.data.dir.perm</name>
     <value>750</value>
-<description>The permissions that should be there on dfs.datanode.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.datanode.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
+    <description>The permissions that should be there on dfs.datanode.data.dir
+      directories. The datanode will not come up if the permissions are
+      different on existing dfs.datanode.data.dir directories. If the directories
+      don't exist, they will be created with this permission.</description>
   </property>
 
   <property>
     <name>dfs.namenode.accesstime.precision</name>
     <value>0</value>
     <description>The access time for HDFS file is precise upto this value.
-                 The default value is 1 hour. Setting a value of 0 disables
-                 access times for HDFS.
+      The default value is 1 hour. Setting a value of 0 disables
+      access times for HDFS.
     </description>
   </property>
 
   <property>
-   <name>dfs.cluster.administrators</name>
-   <value> hdfs</value>
-   <description>ACL for who all can view the default servlets in the HDFS</description>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
   </property>
 
   <property>
@@ -458,14 +458,14 @@ don't exist, they will be created with this permission.</description>
     <value>30000</value>
     <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
   </property>
-  
+
   <property>
     <name>dfs.journalnode.http-address</name>
     <value>0.0.0.0:8480</value>
     <description>The address and port the JournalNode web UI listens on.
-     If the port is 0 then the server will start on a free port. </description>
+      If the port is 0 then the server will start on a free port. </description>
   </property>
-  
+
   <property>
     <name>dfs.journalnode.edits.dir</name>
     <value>/grid/0/hdfs/journal</value>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
index 2ca6c10..393d167 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
@@ -16,45 +16,138 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>2.2.0.2.0.6.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.1.0.2.1.1</version>
 
-    <components>
+      <components>
         <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
+          <name>NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
+          <name>DATANODE</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
+          <name>SECONDARY_NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HDFS_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-        
+
         <component>
-            <name>JOURNALNODE</name>
-            <category>MASTER</category>
+          <name>JOURNALNODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
           <name>ZKFC</name>
           <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>hdfs-site</config-type>
-      <config-type>hadoop-policy</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-libhdfs</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py
new file mode 100644
index 0000000..f8e9c1a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import httplib
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
+  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
+
+  (options, args) = parser.parse_args()
+  
+  hosts = options.hosts.split(',')
+  port = options.port
+
+  for host in hosts:
+    try:
+      conn = httplib.HTTPConnection(host, port)
+      # This can be modified to get a partial url part to be sent with request
+      conn.request("GET", "/")
+      httpCode = conn.getresponse().status
+      conn.close()
+    except Exception:
+      httpCode = 404
+
+    if httpCode != 200:
+      print "Cannot access WEB UI on: http://" + host + ":" + port
+      exit(1)
+      
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
new file mode 100644
index 0000000..57fdb35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_datanode import datanode
+
+
+class DataNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    datanode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    datanode(action="stop")
+
+  def configure(self, env):
+    import params
+
+    datanode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.datanode_pid_file)
+
+
+if __name__ == "__main__":
+  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
new file mode 100644
index 0000000..ec24c7d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
@@ -0,0 +1,52 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class HdfsClient(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def config(self, env):
+    import params
+
+    pass
+
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
new file mode 100644
index 0000000..f7d9f15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+def datanode(action=None):
+  import params
+
+  if action == "configure":
+    Directory(params.dfs_domain_socket_dir,
+              recursive=True,
+              mode=0750,
+              owner=params.hdfs_user,
+              group=params.user_group)
+    for data_dir in params.dfs_data_dir.split(","):
+      Directory(data_dir,
+                recursive=True,
+                mode=0755,
+                owner=params.hdfs_user,
+                group=params.user_group)
+
+  if action == "start":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )
+  if action == "stop":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )