You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/17 16:20:40 UTC

[22/26] ambari git commit: AMBARI-21474. HBase REST Server is missing after Ambari upgrade

AMBARI-21474. HBase REST Server is missing after Ambari upgrade


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b1c3784d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b1c3784d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b1c3784d

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: b1c3784dad1676bad22514e520075ebc267463fc
Parents: 7764e38
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Fri Jul 14 13:28:59 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Mon Jul 17 13:34:52 2017 +0200

----------------------------------------------------------------------
 .../HBASE/package/files/draining_servers.rb     | 164 +++++++
 .../HBASE/package/files/hbase-smoke-cleanup.sh  |  23 +
 .../HBASE/package/files/hbaseSmokeVerify.sh     |  34 ++
 .../services/HBASE/package/scripts/__init__.py  |  19 +
 .../services/HBASE/package/scripts/functions.py |  54 +++
 .../services/HBASE/package/scripts/hbase.py     | 234 ++++++++++
 .../HBASE/package/scripts/hbase_client.py       |  82 ++++
 .../HBASE/package/scripts/hbase_decommission.py |  93 ++++
 .../HBASE/package/scripts/hbase_master.py       | 163 +++++++
 .../HBASE/package/scripts/hbase_regionserver.py | 166 +++++++
 .../package/scripts/hbase_restgatewayserver.py  |  83 ++++
 .../HBASE/package/scripts/hbase_service.py      |  93 ++++
 .../HBASE/package/scripts/hbase_upgrade.py      |  41 ++
 .../services/HBASE/package/scripts/params.py    |  29 ++
 .../HBASE/package/scripts/params_linux.py       | 447 +++++++++++++++++++
 .../HBASE/package/scripts/params_windows.py     |  43 ++
 .../package/scripts/phoenix_queryserver.py      |  88 ++++
 .../HBASE/package/scripts/phoenix_service.py    |  55 +++
 .../HBASE/package/scripts/service_check.py      |  95 ++++
 .../HBASE/package/scripts/setup_ranger_hbase.py | 106 +++++
 .../HBASE/package/scripts/status_params.py      |  68 +++
 .../services/HBASE/package/scripts/upgrade.py   |  65 +++
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 | 117 +++++
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 | 116 +++++
 .../HBASE/package/templates/hbase-smoke.sh.j2   |  44 ++
 .../HBASE/package/templates/hbase.conf.j2       |  35 ++
 .../package/templates/hbase_client_jaas.conf.j2 |  23 +
 .../templates/hbase_grant_permissions.j2        |  40 ++
 .../package/templates/hbase_master_jaas.conf.j2 |  26 ++
 .../templates/hbase_queryserver_jaas.conf.j2    |  26 ++
 .../templates/hbase_regionserver_jaas.conf.j2   |  26 ++
 .../package/templates/hbase_rest_jaas.conf.j2   |  26 ++
 .../HBASE/package/templates/regionservers.j2    |  20 +
 33 files changed, 2744 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/draining_servers.rb
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/draining_servers.rb b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/draining_servers.rb
new file mode 100644
index 0000000..5bcb5b6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/draining_servers.rb
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper 
+
+require 'optparse'
+include Java
+
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.client.HBaseAdmin
+import org.apache.hadoop.hbase.zookeeper.ZKUtil
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "draining_servers"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
+                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+  opts.on('-h', '--help', 'Display usage information') do
+    puts opts
+    exit
+  end
+  options[:debug] = false
+  opts.on('-d', '--debug', 'Display extra debug logging') do
+    options[:debug] = true
+  end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+  serverInfos = admin.getClusterStatus().getServerInfo()
+  servers = []
+  for server in serverInfos
+    servers << server.getServerName()
+  end
+  return servers
+end
+
+def getServerNames(hostOrServers, config)
+  ret = []
+  
+  for hostOrServer in hostOrServers
+    # check whether it is already serverName. No need to connect to cluster
+    parts = hostOrServer.split(',')
+    if parts.size() == 3
+      ret << hostOrServer
+    else 
+      admin = HBaseAdmin.new(config) if not admin
+      servers = getServers(admin)
+
+      hostOrServer = hostOrServer.gsub(/:/, ",")
+      for server in servers 
+        ret << server if server.start_with?(hostOrServer)
+      end
+    end
+  end
+  
+  admin.close() if admin
+  return ret
+end
+
+def addServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.createAndFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+def removeServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.deleteNodeFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+# list servers in draining mode
+def listServers(options)
+  config = HBaseConfiguration.create()
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+  servers.each {|server| puts server}
+end
+
+hostOrServers = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+  apacheLogger = LogFactory.getLog(NAME)
+  # Configure log4j to not spew so much
+  unless (options[:debug]) 
+    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+  end
+  return apacheLogger
+end
+
+# Create a logger and save it to ruby global
+$LOG = configureLogging(options)
+case ARGV[0]
+  when 'add'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    addServers(options, hostOrServers)
+  when 'remove'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    removeServers(options, hostOrServers)
+  when 'list'
+    listServers(options)
+  else
+    puts optparse
+    exit 3
+end

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbase-smoke-cleanup.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbase-smoke-cleanup.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbase-smoke-cleanup.sh
new file mode 100644
index 0000000..cde19e4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbase-smoke-cleanup.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..19276f3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+awk '/id/,/date/' /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/functions.py
new file mode 100644
index 0000000..f98b9b9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/functions.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit
+
+def ensure_unit_for_memory(memory_size):
+  memory_size_values = re.findall('\d+', str(memory_size))
+  memory_size_unit = re.findall('\D+', str(memory_size))
+
+  if len(memory_size_values) > 0:
+    unit = 'm'
+    if len(memory_size_unit) > 0:
+      unit = memory_size_unit[0]
+    if unit not in ['b', 'k', 'm', 'g', 't', 'p']:
+      raise Exception("Memory size unit error. %s - wrong unit" % unit)
+    return "%s%s" % (memory_size_values[0], unit)
+  else:
+    raise Exception('Memory size can not be calculated')

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase.py
new file mode 100644
index 0000000..837cf41
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management import *
+import sys
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+from urlparse import urlparse
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase(name=None):
+  import params
+  XmlConfig("hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site']
+  )
+
+  if params.service_map.has_key(name):
+    # Manually overriding service logon user & password set by the installation package
+    service_name = params.service_map[name]
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.hbase_user,
+                  password = Script.get_password(params.hbase_user))
+
+# name is 'master' or 'regionserver' or 'queryserver' or 'client'
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase(name=None):
+  import params
+
+  Directory( params.etc_prefix_dir,
+      mode=0755
+  )
+
+  Directory( params.hbase_conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      create_parents = True
+  )
+   
+  Directory(params.java_io_tmpdir,
+      create_parents = True,
+      mode=0777
+  )
+
+  # If a file location is specified in ioengine parameter,
+  # ensure that directory exists. Otherwise create the
+  # directory with permissions assigned to hbase:hadoop.
+  ioengine_input = params.ioengine_param
+  if ioengine_input != None:
+    if ioengine_input.startswith("file:/"):
+      ioengine_fullpath = ioengine_input[5:]
+      ioengine_dir = os.path.dirname(ioengine_fullpath)
+      Directory(ioengine_dir,
+          owner = params.hbase_user,
+          group = params.user_group,
+          create_parents = True,
+          mode = 0755
+      )
+  
+  parent_dir = os.path.dirname(params.tmp_dir)
+  # In case if we have several placeholders in path
+  while ("${" in parent_dir):
+    parent_dir = os.path.dirname(parent_dir)
+  if parent_dir != os.path.abspath(os.sep) :
+    Directory (parent_dir,
+          create_parents = True,
+          cd_access="a",
+    )
+    Execute(("chmod", "1777", parent_dir), sudo=True)
+
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  XmlConfig( "core-site.xml",
+             conf_dir = params.hbase_conf_dir,
+             configurations = params.config['configurations']['core-site'],
+             configuration_attributes=params.config['configuration_attributes']['core-site'],
+             owner = params.hbase_user,
+             group = params.user_group
+  )
+
+  if 'hdfs-site' in params.config['configurations']:
+    XmlConfig( "hdfs-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+
+    XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+    )
+
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-policy'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-policy'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner = params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template),
+       group = params.user_group,
+  )
+  
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents = True,
+            owner='root',
+            group='root'
+            )
+  
+  File(os.path.join(params.limits_conf_dir, 'hbase.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hbase.conf.j2")
+       )
+    
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{name}_jaas.conf"))
+  
+  if name != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      create_parents = True,
+      cd_access = "a",
+      mode = 0755,
+    )
+  
+    Directory (params.log_dir,
+      owner = params.hbase_user,
+      create_parents = True,
+      cd_access = "a",
+      mode = 0755,
+    )
+
+  if (params.log4j_props != None):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hbase_user
+    )
+  if name == "master":
+    if not params.hbase_root_dir_scheme or params.hbase_root_dir_scheme == urlparse(params.default_fs).scheme:  # if hbase.rootdir has no scheme specified or is in HDFS
+      # Create hbase.rootdir
+      params.HdfsResource(params.hbase_hdfs_root_dir,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hbase_user
+      )
+    else:  # hbase.rootdir is not in HDFS
+      # Do NOT create hbase.rootdir, but only log a message
+      Logger.info(format("hbase.rootidr '{params.hbase_hdfs_root_dir} not created', as its location is not in HDFS."))
+
+    params.HdfsResource(params.hbase_staging_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         mode=0711
+    )
+    if params.create_hbase_home_directory:
+      params.HdfsResource(params.hbase_home_directory,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hbase_user,
+                          mode=0755
+      )
+    params.HdfsResource(None, action="execute")
+
+  if params.phoenix_enabled:
+    Package(params.phoenix_package,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
+
+def hbase_TemplateConfig(name, tag=None):
+  import params
+
+  TemplateConfig( format("{hbase_conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_client.py
new file mode 100644
index 0000000..f18a96a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from hbase import hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+
+class HbaseClient(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseClientWindows(HbaseClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseClientDefault(HbaseClient):
+  def get_component_name(self):
+    return "hbase-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): 
+      conf_select.select(params.stack_name, "hbase", params.version)
+      stack_select.select("hbase-client", params.version)
+
+      # phoenix may not always be deployed
+      try:
+        stack_select.select("phoenix-client", params.version)
+      except Exception as e:
+        print "Ignoring error due to missing phoenix-client"
+        print str(e)
+
+
+      # set all of the hadoop clients since hbase client is upgraded as part
+      # of the final "CLIENTS" group and we need to ensure that hadoop-client
+      # is also set
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000..022465a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_decommission.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  File(params.region_drainer, content=StaticFile("draining_servers.rb"), owner=params.hbase_user, mode="f")
+
+  hosts = params.hbase_excluded_hosts.split(",")
+  for host in hosts:
+    if host:
+      if params.hbase_drain_only == True:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+      else:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_mover} unload {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+        Execute(regionmover_cmd, user=params.hbase_user, logoutput=True)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  kinit_cmd = params.kinit_cmd_master
+
+  File(params.region_drainer,
+       content=StaticFile("draining_servers.rb"),
+       mode=0755
+  )
+  
+  if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
+
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
+
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_master.py
new file mode 100644
index 0000000..ae0e0d9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.service import Service
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+class HbaseMaster(Script):
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='master')
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+    hbase_decommission(env)
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseMasterWindows(HbaseMaster):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_master_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_master_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_master_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseMasterDefault(HbaseMaster):
+  def get_component_name(self):
+    return "hbase-master"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-master")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-master")
+    hbase_service('master', action='start')
+    
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    hbase_service('master', action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
+    check_process_status(pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.master.keytab.file',
+                           'hbase.master.kerberos.principal']
+      props_read_check = ['hbase.master.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                  props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.master.keytab.file' not in security_params['hbase-site']
+               or 'hbase.master.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.master.keytab.file'],
+                                security_params['hbase-site']['hbase.master.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+      
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+if __name__ == "__main__":
+  HbaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..370167b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+
+from resource_management import *
+from resource_management.core import shell
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hbase import hbase
+from hbase_service import hbase_service
+import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseRegionServer(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='regionserver')
+
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseRegionServerWindows(HbaseRegionServer):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_regionserver_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_regionserver_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_regionserver_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseRegionServerDefault(HbaseRegionServer):
+  def get_component_name(self):
+    return "hbase-regionserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-regionserver")
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.post_regionserver(env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-regionserver")
+
+    hbase_service('regionserver', action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+    check_process_status(pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.regionserver.keytab.file',
+                           'hbase.regionserver.kerberos.principal']
+      props_read_check = ['hbase.regionserver.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                   props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.regionserver.keytab.file' not in security_params['hbase-site']
+               or 'hbase.regionserver.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.regionserver.keytab.file'],
+                                security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_restgatewayserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_restgatewayserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_restgatewayserver.py
new file mode 100644
index 0000000..1d028d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_restgatewayserver.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+import upgrade
+
+class HbaseRestGatewayServer(Script):
+  def get_component_name(self):
+    return "hbase-restserver"
+
+  def install(self, env):
+    self.install_packages(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(name='rest')
+    
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-restserver")
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'rest',
+      action = 'start'
+    )
+    
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'rest',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-rest.pid")
+    check_process_status(pid_file)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_decommission(env)
+
+
+if __name__ == "__main__":
+  HbaseRestGatewayServer().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_service.py
new file mode 100644
index 0000000..41daaeb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_service.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from datetime import datetime
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.shell import as_sudo
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.logger import Logger
+
+def hbase_service(name, action = 'start'):
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {hbase_conf_dir}")
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid")
+    pid_expression = as_sudo(["cat", pid_file])
+    no_op_test = as_sudo(["test", "-f", pid_file]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1")
+    
+    # delete wal log if HBase version has moved down
+    if params.to_backup_wal_dir:
+      wal_directory = params.wal_directory
+      timestamp = datetime.now()
+      timestamp_format = '%Y%m%d%H%M%S'
+      wal_directory_backup = '%s_%s' % (wal_directory, timestamp.strftime(timestamp_format))
+
+      check_if_wal_dir_exists = format("hdfs dfs -ls {wal_directory}")
+      wal_dir_exists = False
+      try:
+        Execute(check_if_wal_dir_exists,
+                user=params.hbase_user
+                )
+        wal_dir_exists = True
+      except Exception, e:
+        Logger.error(format("Did not find HBase WAL directory {wal_directory}. It's possible that it was already moved. Exception: {e.message}"))
+
+      if wal_dir_exists:
+        move_wal_dir_cmd = format("hdfs dfs -mv {wal_directory} {wal_directory_backup}")
+        try:
+          Execute(move_wal_dir_cmd,
+            user=params.hbase_user
+          )
+        except Exception, e:
+          Logger.error(format("Failed to backup HBase WAL directory, command: {move_wal_dir_cmd} . Exception: {e.message}"))
+
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      
+      try:
+        Execute ( daemon_cmd,
+          not_if = no_op_test,
+          user = params.hbase_user
+        )
+      except:
+        show_logs(params.log_dir, params.hbase_user)
+        raise
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role}")
+
+      try:
+        Execute ( daemon_cmd,
+          user = params.hbase_user,
+          only_if = no_op_test,
+          # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
+          timeout = params.hbase_regionserver_shutdown_timeout,
+          on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `{pid_expression}`"),
+        )
+      except:
+        show_logs(params.log_dir, params.hbase_user)
+        raise
+      
+      File(pid_file,
+           action = "delete",
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_upgrade.py
new file mode 100644
index 0000000..2dc9883
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/hbase_upgrade.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script import Script
+from resource_management.core.resources.system import Execute
+
+class HbaseMasterUpgrade(Script):
+
+  def take_snapshot(self, env):
+    import params
+
+    snap_cmd = "echo 'snapshot_all' | {0} shell".format(params.hbase_cmd)
+
+    exec_cmd = "{0} {1}".format(params.kinit_cmd, snap_cmd)
+
+    Execute(exec_cmd, user=params.hbase_user)
+
+  def restore_snapshot(self, env):
+    import params
+    print "TODO AMBARI-12698"
+
+if __name__ == "__main__":
+  HbaseMasterUpgrade().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params.py
new file mode 100644
index 0000000..f3208ce
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+retryAble = default("/commandParams/command_retry_enabled", False)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_linux.py
new file mode 100644
index 0000000..bff022d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_linux.py
@@ -0,0 +1,447 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import status_params
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from urlparse import urlparse
+
+from functions import calc_xmn_from_xms, ensure_unit_for_memory
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.os_check import OSCheck
+from ambari_commons.str_utils import string_set_intersection
+
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import is_empty
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions.constants import Direction
+
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_name = status_params.stack_name
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+version = default("/commandParams/version", None)
+component_directory = status_params.component_directory
+etc_prefix_dir = "/etc/hbase"
+
+stack_version_unformatted = status_params.stack_version_unformatted
+stack_version_formatted = status_params.stack_version_formatted
+stack_root = status_params.stack_root
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
+stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks)
+
+# hadoop default parameters
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+hbase_cmd = "/usr/lib/hbase/bin/hbase"
+hbase_max_direct_memory_size = None
+
+# hadoop parameters for stacks supporting rolling_upgrade
+if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
+  daemon_script = format('{stack_root}/current/hbase-client/bin/hbase-daemon.sh')
+  region_mover = format('{stack_root}/current/hbase-client/bin/region_mover.rb')
+  region_drainer = format('{stack_root}/current/hbase-client/bin/draining_servers.rb')
+  hbase_cmd = format('{stack_root}/current/hbase-client/bin/hbase')
+
+  hbase_max_direct_memory_size  = default('configurations/hbase-env/hbase_max_direct_memory_size', None)
+
+  tmp_daemon_script = format("{stack_root}/current/{component_directory}/bin/hbase-daemon.sh")
+  if os.path.exists(tmp_daemon_script):
+    daemon_script = tmp_daemon_script
+
+  region_mover = format("{stack_root}/current/{component_directory}/bin/region_mover.rb")
+  region_drainer = format("{stack_root}/current/{component_directory}/bin/draining_servers.rb")
+  hbase_cmd = format("{stack_root}/current/{component_directory}/bin/hbase")
+
+
+hbase_conf_dir = status_params.hbase_conf_dir
+limits_conf_dir = status_params.limits_conf_dir
+
+hbase_user_nofile_limit = default("/configurations/hbase-env/hbase_user_nofile_limit", "32000")
+hbase_user_nproc_limit = default("/configurations/hbase-env/hbase_user_nproc_limit", "16000")
+
+# no symlink for phoenix-server at this point
+phx_daemon_script = format('{stack_root}/current/phoenix-server/bin/queryserver.py')
+
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
+hbase_included_hosts = config['commandParams']['included_hosts']
+
+hbase_user = status_params.hbase_user
+hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+java_io_tmpdir = default("/configurations/hbase-env/hbase_java_io_tmpdir", "/tmp")
+master_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_master_heapsize'])
+
+regionserver_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_regionserver_heapsize'])
+regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
+regionserver_xmn_percent = expect("/configurations/hbase-env/hbase_regionserver_xmn_ratio", float)
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
+
+hbase_regionserver_shutdown_timeout = expect('/configurations/hbase-env/hbase_regionserver_shutdown_timeout', int, 30)
+
+phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', [])
+phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled', False)
+has_phoenix = len(phoenix_hosts) > 0
+
+underscored_version = stack_version_unformatted.replace('.', '_')
+dashed_version = stack_version_unformatted.replace('.', '-')
+if OSCheck.is_redhat_family() or OSCheck.is_suse_family():
+  phoenix_package = format("phoenix_{underscored_version}_*")
+elif OSCheck.is_ubuntu_family():
+  phoenix_package = format("phoenix-{dashed_version}-.*")
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+local_dir = config['configurations']['hbase-site']['hbase.local.dir']
+ioengine_param = default('/configurations/hbase-site/hbase.bucketcache.ioengine', None)
+
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
+rest_jaas_config_file = format("{hbase_conf_dir}/hbase_rest_jaas.conf")
+queryserver_jaas_config_file = format("{hbase_conf_dir}/hbase_queryserver_jaas.conf")
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
+    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+  else:
+    metric_collector_host = ams_collector_hosts[0]
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
+if 'slave_hosts' in config['clusterHostInfo']:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+else:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
+
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+smokeuser_permissions = "RWXCA"
+
+titan_user = default('configurations/titan-env/titan_user', 'titan')
+titan_user_permissions = "RWC"
+titan_user_hbase_namespace = "@default"
+
+service_check_data = get_unique_id_and_date()
+user_group = config['configurations']['cluster-env']["user_group"]
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  _rest_server_jaas_princ = config['configurations']['hbase-site']['hbase.rest.kerberos.principal']
+  _rest_server_spnego_jaas_princ = config['configurations']['hbase-site']['hbase.rest.authentication.kerberos.principal']
+  _queryserver_jaas_princ = config['configurations']['hbase-site']['phoenix.queryserver.kerberos.principal']
+  if not is_empty(_queryserver_jaas_princ):
+    queryserver_jaas_princ =_queryserver_jaas_princ.replace('_HOST',_hostname_lowercase)
+  if not is_empty(_rest_server_jaas_princ):
+    rest_server_jaas_princ = _rest_server_jaas_princ.replace('_HOST',_hostname_lowercase)
+  if not is_empty(_rest_server_spnego_jaas_princ):
+    rest_server_spnego_jaas_princ = _rest_server_spnego_jaas_princ.replace('_HOST',_hostname_lowercase)
+
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+rest_server_keytab_path = config['configurations']['hbase-site']['hbase.rest.keytab.file']
+rest_server_spnego_keytab_path = config['configurations']['hbase-site']['hbase.rest.authentication.kerberos.keytab']
+queryserver_keytab_path = config['configurations']['hbase-site']['phoenix.queryserver.keytab.file']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+if security_enabled:
+  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
+  kinit_cmd_master = format("{kinit_path_local} -kt {master_keytab_path} {master_jaas_princ};")
+  master_security_config = format("-Djava.security.auth.login.config={hbase_conf_dir}/hbase_master_jaas.conf")
+else:
+  kinit_cmd = ""
+  kinit_cmd_master = ""
+  master_security_config = ""
+
+#log4j.properties
+if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
+  log4j_props = config['configurations']['hbase-log4j']['content']
+else:
+  log4j_props = None
+  
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_root_dir_scheme = urlparse(hbase_hdfs_root_dir).scheme
+
+hbase_staging_dir = "/apps/hbase/staging"
+#for create_hdfs_directory
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+# ranger hbase properties
+policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
+if 'admin-properties' in config['configurations'] and 'policymgr_external_url' in config['configurations']['admin-properties'] and policymgr_mgr_url.endswith('/'):
+  policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
+xa_audit_db_name = default('/configurations/admin-properties/audit_db_name', 'ranger_audits')
+xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger')
+xa_db_host = config['configurations']['admin-properties']['db_host']
+repo_name = str(config['clusterName']) + '_hbase'
+
+common_name_for_certificate = config['configurations']['ranger-hbase-plugin-properties']['common.name.for.certificate']
+
+zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+hbase_zookeeper_property_clientPort = config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort']
+hbase_security_authentication = config['configurations']['hbase-site']['hbase.security.authentication']
+hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+
+repo_config_username = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
+
+ranger_env = config['configurations']['ranger-env']
+ranger_plugin_properties = config['configurations']['ranger-hbase-plugin-properties']
+policy_user = config['configurations']['ranger-hbase-plugin-properties']['policy_user']
+
+#For curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+java_share_dir = '/usr/share/java'
+enable_ranger_hbase = False
+if has_ranger_admin:
+  enable_ranger_hbase = (config['configurations']['ranger-hbase-plugin-properties']['ranger-hbase-plugin-enabled'].lower() == 'yes')
+  xa_audit_db_password = ''
+  if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db:
+    xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
+  repo_config_password = unicode(config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
+  xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
+  previous_jdbc_jar_name = None
+
+  if stack_supports_ranger_audit_db:
+    if xa_audit_db_flavor == 'mysql':
+      jdbc_jar_name = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mysql_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
+      jdbc_driver = "com.mysql.jdbc.Driver"
+    elif xa_audit_db_flavor == 'oracle':
+      jdbc_jar_name = default("/hostLevelParams/custom_oracle_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_oracle_jdbc_name", None)
+      colon_count = xa_db_host.count(':')
+      if colon_count == 2 or colon_count == 0:
+        audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
+      else:
+        audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
+      jdbc_driver = "oracle.jdbc.OracleDriver"
+    elif xa_audit_db_flavor == 'postgres':
+      jdbc_jar_name = default("/hostLevelParams/custom_postgres_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_postgres_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
+      jdbc_driver = "org.postgresql.Driver"
+    elif xa_audit_db_flavor == 'mssql':
+      jdbc_jar_name = default("/hostLevelParams/custom_mssql_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_mssql_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
+      jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
+    elif xa_audit_db_flavor == 'sqla':
+      jdbc_jar_name = default("/hostLevelParams/custom_sqlanywhere_jdbc_name", None)
+      previous_jdbc_jar_name = default("/hostLevelParams/previous_custom_sqlanywhere_jdbc_name", None)
+      audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
+      jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
+
+  downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  driver_curl_target = format("{stack_root}/current/{component_directory}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  previous_jdbc_jar = format("{stack_root}/current/{component_directory}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None
+  sql_connector_jar = ''
+
+  if security_enabled:
+    master_principal = config['configurations']['hbase-site']['hbase.master.kerberos.principal']
+
+  hbase_ranger_plugin_config = {
+    'username': repo_config_username,
+    'password': repo_config_password,
+    'hadoop.security.authentication': hadoop_security_authentication,
+    'hbase.security.authentication': hbase_security_authentication,
+    'hbase.zookeeper.property.clientPort': hbase_zookeeper_property_clientPort,
+    'hbase.zookeeper.quorum': hbase_zookeeper_quorum,
+    'zookeeper.znode.parent': zookeeper_znode_parent,
+    'commonNameForCertificate': common_name_for_certificate,
+    'hbase.master.kerberos.principal': master_principal if security_enabled else ''
+  }
+
+  hbase_ranger_plugin_repo = {
+    'isActive': 'true',
+    'config': json.dumps(hbase_ranger_plugin_config),
+    'description': 'hbase repo',
+    'name': repo_name,
+    'repositoryType': 'hbase',
+    'assetType': '2'
+  }
+
+  if stack_supports_ranger_kerberos and security_enabled:
+    hbase_ranger_plugin_config['policy.download.auth.users'] = hbase_user
+    hbase_ranger_plugin_config['tag.download.auth.users'] = hbase_user
+    hbase_ranger_plugin_config['policy.grantrevoke.auth.users'] = hbase_user
+
+  if stack_supports_ranger_kerberos:
+    hbase_ranger_plugin_config['ambari.service.check.user'] = policy_user
+
+    hbase_ranger_plugin_repo = {
+      'isEnabled': 'true',
+      'configs': hbase_ranger_plugin_config,
+      'description': 'hbase repo',
+      'name': repo_name,
+      'type': 'hbase'
+    }
+
+  ranger_hbase_principal = None
+  ranger_hbase_keytab = None
+  if stack_supports_ranger_kerberos and security_enabled and 'hbase-master' in component_directory.lower():
+    ranger_hbase_principal = master_jaas_princ
+    ranger_hbase_keytab = master_keytab_path
+  elif  stack_supports_ranger_kerberos and security_enabled and 'hbase-regionserver' in component_directory.lower():
+    ranger_hbase_principal = regionserver_jaas_princ
+    ranger_hbase_keytab = regionserver_keytab_path
+
+  xa_audit_db_is_enabled = False
+  ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
+  if xml_configurations_supported and stack_supports_ranger_audit_db:
+    xa_audit_db_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.db']
+  xa_audit_hdfs_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else None
+  ssl_keystore_password = unicode(config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
+  ssl_truststore_password = unicode(config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
+  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
+
+  #For SQLA explicitly disable audit to DB for Ranger
+  if xa_audit_db_flavor == 'sqla':
+    xa_audit_db_is_enabled = False
+
+
+create_hbase_home_directory = check_stack_feature(StackFeature.HBASE_HOME_DIRECTORY, stack_version_formatted)
+hbase_home_directory = format("/user/{hbase_user}")
+
+atlas_hosts = default('/clusterHostInfo/atlas_server_hosts', [])
+has_atlas = len(atlas_hosts) > 0
+
+metadata_user = default('/configurations/atlas-env/metadata_user', None)
+atlas_graph_storage_hostname = default('/configurations/application-properties/atlas.graph.storage.hostname', None)
+atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None)
+atlas_audit_hbase_tablename = default('/configurations/application-properties/atlas.audit.hbase.tablename', None)
+
+if has_atlas:
+  zk_hosts_matches = string_set_intersection(atlas_graph_storage_hostname, hbase_zookeeper_quorum)
+  atlas_with_managed_hbase = len(zk_hosts_matches) > 0
+else:
+  atlas_with_managed_hbase = False
+
+wal_directory = "/apps/hbase/data/MasterProcWALs"
+
+backup_wal_dir = default('/configurations/hbase-env/backup_wal_dir', False)
+
+#Need to make sure not to keep removing WAL logs once EU is finalized.
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+to_backup_wal_dir = upgrade_direction is not None and upgrade_direction == Direction.UPGRADE and backup_wal_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_windows.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_windows.py
new file mode 100644
index 0000000..7d634cb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/params_windows.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+import status_params
+
+# server configurations
+config = Script.get_config()
+hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+hbase_bin_dir = os.path.join(os.environ["HBASE_HOME"],'bin')
+hbase_executable = os.path.join(hbase_bin_dir,"hbase.cmd")
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+hbase_user = hadoop_user
+
+#decomm params
+region_drainer = os.path.join(hbase_bin_dir,"draining_servers.rb")
+region_mover = os.path.join(hbase_bin_dir,"region_mover.rb")
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = config['commandParams']['mark_draining_only']
+
+service_map = {
+  'master' : status_params.hbase_master_win_service_name,
+  'regionserver' : status_params.hbase_regionserver_win_service_name
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1c3784d/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_queryserver.py
new file mode 100644
index 0000000..82113e9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/package/scripts/phoenix_queryserver.py
@@ -0,0 +1,88 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.script import Script
+from phoenix_service import phoenix_service
+from hbase import hbase
+
+# Note: Phoenix Query Server is only applicable to stack version supporting Phoenix.
+class PhoenixQueryServer(Script):
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+
+  def get_component_name(self):
+    return "phoenix-server"
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='queryserver')
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    phoenix_service('start')
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    phoenix_service('stop')
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.stack_version_formatted and check_stack_feature(StackFeature.PHOENIX, params.stack_version_formatted):     
+      # phoenix uses hbase configs
+      conf_select.select(params.stack_name, "hbase", params.version)
+      stack_select.select("phoenix-server", params.version)
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    phoenix_service('status')
+
+
+  def security_status(self, env):
+    self.put_structured_out({"securityState": "UNSECURED"})
+    
+  def get_log_folder(self):
+    import params
+    return params.log_dir
+  
+  def get_user(self):
+    import params
+    return params.hbase_user
+
+if __name__ == "__main__":
+  PhoenixQueryServer().execute()
\ No newline at end of file