You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2015/04/16 00:55:37 UTC

[1/8] ambari git commit: AMBARI-9993. Add support for management of Phoenix Query Server to HDP Stack (Nick Dimiduk via alejandro)

Repository: ambari
Updated Branches:
  refs/heads/trunk b58969228 -> 546475471


http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json b/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
new file mode 100644
index 0000000..ea57e66
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
@@ -0,0 +1,738 @@
+{
+    "configuration_attributes": {
+        "ranger-knox-plugin-properties": {}, 
+        "gateway-log4j": {}, 
+        "ranger-hdfs-plugin-properties": {}, 
+        "hbase-policy": {}, 
+        "kerberos-env": {}, 
+        "storm-site": {}, 
+        "hdfs-site": {}, 
+        "storm-env": {}, 
+        "hbase-site": {}, 
+        "knox-env": {}, 
+        "hadoop-policy": {}, 
+        "hdfs-log4j": {}, 
+        "ranger-hbase-plugin-properties": {}, 
+        "krb5-conf": {}, 
+        "ldap-log4j": {}, 
+        "core-site": {}, 
+        "hadoop-env": {}, 
+        "zookeeper-log4j": {}, 
+        "topology": {}, 
+        "hbase-log4j": {}, 
+        "oozie-site": {}, 
+        "gateway-site": {}, 
+        "hbase-env": {}, 
+        "zookeeper-env": {}, 
+        "zoo.cfg": {}, 
+        "ranger-storm-plugin-properties": {}, 
+        "webhcat-site": {}, 
+        "users-ldif": {}, 
+        "cluster-env": {}
+    }, 
+    "commandParams": {
+        "service_package_folder": "common-services/HBASE/0.96.0.2.0/package", 
+        "script": "scripts/hbase_regionserver.py", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "version": "2.3.0.0-1606", 
+        "excluded_hosts": "host1",
+        "command_timeout": "900", 
+        "script_type": "PYTHON"
+    }, 
+    "roleCommand": "CUSTOM_COMMAND", 
+    "kerberosCommandParams": [], 
+    "clusterName": "c1", 
+    "hostname": "c6405.ambari.apache.org", 
+    "hostLevelParams": {
+        "jdk_location": "http://c6405.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.8.0_40", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6405.ambari.apache.org/ambarirca", 
+        "jce_name": "jce_policy-8.zip", 
+        "custom_command": "RESTART", 
+        "oracle_jdbc_url": "http://c6405.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.3.0.0-1606\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.3\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.3.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.3.0.0-1606\",\"baseSaved\":true},{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"baseSaved\":true}]", 
+        "group_list": "[\"hadoop\",\"users\",\"knox\"]", 
+        "agentCacheDir": "/var/lib/ambari-agent/cache", 
+        "stack_version": "2.3", 
+        "stack_name": "HDP", 
+        "db_name": "ambari", 
+        "jdk_name": "jdk-8u40-linux-x64.tar.gz", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "java_version": "8", 
+        "ambari_db_rca_username": "mapred", 
+        "db_driver_filename": "mysql-connector-java.jar", 
+        "user_list": "[\"storm\",\"zookeeper\",\"ambari-qa\",\"hdfs\",\"hbase\",\"knox\"]", 
+        "mysql_jdbc_url": "http://c6405.ambari.apache.org:8080/resources//mysql-connector-java.jar", 
+        "clientsToUpdateConfigs": "[\"*\"]"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {
+        "component_category": "SLAVE"
+    }, 
+    "serviceName": "HBASE", 
+    "role": "HBASE_REGIONSERVER", 
+    "forceRefreshConfigTags": [], 
+    "taskId": 115, 
+    "public_hostname": "c6405.ambari.apache.org", 
+    "configurations": {
+        "ranger-knox-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "KNOX_HOME": "/usr/hdp/current/knox-server", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "ranger-knox-plugin-enabled": "No", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "admin", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "admin-password"
+        }, 
+        "gateway-log4j": {
+            "content": "\n\n      # Licensed to the Apache Software Foundation (ASF) under one\n      # or more contributor license agreements. See the NOTICE file\n      # distributed with this work for additional information\n      # regarding copyright ownership. The ASF licenses this file\n      # to you under the Apache License, Version 2.0 (the\n      # \"License\"); you may not use this file except in compliance\n      # with the License. You may obtain a copy of the License at\n      #\n      # http://www.apache.org/licenses/LICENSE-2.0\n      #\n      # Unless required by applicable law or agreed to in writing, software\n      # distributed under the License is distributed on an \"AS IS\" BASIS,\n      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n      # See the License for the specific language governing permissions and\n      # limitations under the License.\n\n      app.log.dir=${launcher.dir}/../logs\n      app.log.file=${launcher.name}.lo
 g\n      app.audit.file=${launcher.name}-audit.log\n\n      log4j.rootLogger=ERROR, drfa\n\n      log4j.logger.org.apache.hadoop.gateway=INFO\n      #log4j.logger.org.apache.hadoop.gateway=DEBUG\n\n      #log4j.logger.org.eclipse.jetty=DEBUG\n      #log4j.logger.org.apache.shiro=DEBUG\n      #log4j.logger.org.apache.http=DEBUG\n      #log4j.logger.org.apache.http.client=DEBUG\n      #log4j.logger.org.apache.http.headers=DEBUG\n      #log4j.logger.org.apache.http.wire=DEBUG\n\n      log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n      log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n      log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n      log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n      log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n      log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n      log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n      log4j.appender.drfa.layout.ConversionPat
 tern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n      log4j.logger.audit=INFO, auditfile\n      log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender\n      log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}\n      log4j.appender.auditfile.Append = true\n      log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd\n      log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop"
+        }, 
+        "hbase-policy": {
+            "security.masterregion.protocol.acl": "*", 
+            "security.admin.protocol.acl": "*", 
+            "security.client.protocol.acl": "*"
+        }, 
+        "kerberos-env": {
+            "kdc_host": "c6405.ambari.apache.org", 
+            "create_attributes_template": "\n{\n  \"objectClass\": [\"top\", \"person\", \"organizationalPerson\", \"user\"],\n  \"cn\": \"$principal_name\",\n  #if( $is_service )\n  \"servicePrincipalName\": \"$principal_name\",\n  #end\n  \"userPrincipalName\": \"$normalized_principal\",\n  \"unicodePwd\": \"$password\",\n  \"accountExpires\": \"0\",\n  \"userAccountControl\": \"66048\"\n}\n    ", 
+            "realm": "EXAMPLE.COM", 
+            "container_dn": "", 
+            "ldap_url": "", 
+            "encryption_types": "aes des3-cbc-sha1 rc4 des-cbc-md5", 
+            "admin_server_host": "c6405.ambari.apache.org", 
+            "executable_search_paths": "/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin", 
+            "kdc_type": "mit-kdc", 
+            "manage_identities": "true"
+        }, 
+        "storm-site": {
+            "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer", 
+            "topology.workers": "1", 
+            "drpc.worker.threads": "64", 
+            "storm.messaging.netty.client_worker_threads": "1", 
+            "supervisor.heartbeat.frequency.secs": "5", 
+            "topology.executor.send.buffer.size": "1024", 
+            "drpc.childopts": "-Xmx768m _JAAS_PLACEHOLDER", 
+            "nimbus.thrift.port": "6627", 
+            "storm.zookeeper.retry.intervalceiling.millis": "30000", 
+            "storm.local.dir": "/hadoop/storm", 
+            "storm.zookeeper.superACL": "sasl:{{storm_bare_jaas_principal}}", 
+            "topology.receiver.buffer.size": "8", 
+            "storm.zookeeper.servers": "['c6405.ambari.apache.org']", 
+            "transactional.zookeeper.root": "/transactional", 
+            "drpc.authorizer": "backtype.storm.security.auth.authorizer.DRPCSimpleACLAuthorizer", 
+            "_storm.min.ruid": "null", 
+            "drpc.request.timeout.secs": "600", 
+            "topology.skip.missing.kryo.registrations": "false", 
+            "worker.heartbeat.frequency.secs": "1", 
+            "zmq.hwm": "0", 
+            "storm.zookeeper.connection.timeout": "15000", 
+            "_storm.thrift.secure.transport": "backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin", 
+            "storm.messaging.netty.server_worker_threads": "1", 
+            "supervisor.worker.start.timeout.secs": "120", 
+            "zmq.threads": "1", 
+            "topology.acker.executors": "null", 
+            "storm.local.mode.zmq": "false", 
+            "topology.max.task.parallelism": "null", 
+            "topology.max.error.report.per.interval": "5", 
+            "topology.debug": "false", 
+            "drpc.queue.size": "128", 
+            "storm.principal.tolocal": "backtype.storm.security.auth.KerberosPrincipalToLocal", 
+            "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM", 
+            "java.security.auth.login.config": "{{conf_dir}}/storm_jaas.conf", 
+            "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM", 
+            "storm.zookeeper.retry.times": "5", 
+            "nimbus.monitor.freq.secs": "10", 
+            "storm.cluster.mode": "distributed", 
+            "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", 
+            "drpc.invocations.port": "3773", 
+            "_storm.thrift.nonsecure.transport": "backtype.storm.security.auth.SimpleTransportPlugin", 
+            "storm.zookeeper.root": "/storm", 
+            "logviewer.childopts": "-Xmx128m _JAAS_PLACEHOLDER", 
+            "transactional.zookeeper.port": "null", 
+            "topology.worker.childopts": "null", 
+            "topology.max.spout.pending": "null", 
+            "nimbus.cleanup.inbox.freq.secs": "600", 
+            "storm.messaging.netty.min_wait_ms": "100", 
+            "nimbus.task.timeout.secs": "30", 
+            "nimbus.thrift.max_buffer_size": "1048576", 
+            "topology.sleep.spout.wait.strategy.time.ms": "1", 
+            "topology.optimize": "true", 
+            "ui.filter.params": "{'type': 'kerberos', 'kerberos.principal': '{{storm_ui_jaas_principal}}', 'kerberos.keytab': '{{storm_ui_keytab_path}}', 'kerberos.name.rules': 'DEFAULT'}", 
+            "nimbus.reassign": "true", 
+            "storm.messaging.transport": "backtype.storm.messaging.netty.Context", 
+            "logviewer.appender.name": "A1", 
+            "nimbus.supervisor.users": "['{{storm_user}}']", 
+            "nimbus.host": "c6405.ambari.apache.org", 
+            "ui.port": "8744", 
+            "supervisor.slots.ports": "[6700, 6701]", 
+            "nimbus.file.copy.expiration.secs": "600", 
+            "supervisor.monitor.frequency.secs": "3", 
+            "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER", 
+            "transactional.zookeeper.servers": "null", 
+            "zmq.linger.millis": "5000", 
+            "topology.error.throttle.interval.secs": "10", 
+            "topology.worker.shared.thread.pool.size": "4", 
+            "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib", 
+            "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy", 
+            "task.heartbeat.frequency.secs": "3", 
+            "topology.transfer.buffer.size": "1024", 
+            "storm.zookeeper.session.timeout": "20000", 
+            "nimbus.admins": "['{{storm_user}}']", 
+            "topology.executor.receive.buffer.size": "1024", 
+            "topology.stats.sample.rate": "0.05", 
+            "topology.fall.back.on.java.serialization": "true", 
+            "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port={{jmxremote_port}} -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM", 
+            "topology.enable.message.timeouts": "true", 
+            "storm.messaging.netty.max_wait_ms": "1000", 
+            "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator", 
+            "nimbus.supervisor.timeout.secs": "60", 
+            "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", 
+            "storm.messaging.netty.buffer_size": "5242880", 
+            "drpc.port": "3772", 
+            "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory", 
+            "storm.zookeeper.retry.interval": "1000", 
+            "nimbus.authorizer": "backtype.storm.security.auth.authorizer.SimpleACLAuthorizer", 
+            "storm.messaging.netty.max_retries": "30", 
+            "topology.tick.tuple.freq.secs": "null", 
+            "supervisor.enable": "true", 
+            "nimbus.task.launch.secs": "120", 
+            "task.refresh.poll.secs": "10", 
+            "topology.message.timeout.secs": "30", 
+            "nimbus.inbox.jar.expiration.secs": "3600", 
+            "topology.state.synchronization.timeout.secs": "60", 
+            "supervisor.worker.timeout.secs": "30", 
+            "ui.filter": "org.apache.hadoop.security.authentication.server.AuthenticationFilter", 
+            "topology.trident.batch.emit.interval.millis": "500", 
+            "topology.builtin.metrics.bucket.size.secs": "60", 
+            "storm.thrift.transport": "{{storm_thrift_transport}}", 
+            "logviewer.port": "8000", 
+            "storm.log.dir": "{{log_dir}}", 
+            "storm.zookeeper.port": "2181"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.checkpoint.period": "21600", 
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "nfs.dump.dir": "/tmp/.hdfs-nfs", 
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
+            "dfs.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.kerberos.principal": "dn/_HOST@EXAMPLE.COM", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:1019", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1.0f", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.namenode.rpc-address": "c6405.ambari.apache.org:8020", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.https.port": "50470", 
+            "dfs.namenode.https-address": "c6405.ambari.apache.org:50470", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
+            "dfs.blocksize": "134217728", 
+            "dfs.datanode.max.transfer.threads": "16384", 
+            "dfs.secondary.namenode.kerberos.internal.spnego.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "25", 
+            "dfs.web.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6405.ambari.apache.org:50070", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "dfs.datanode.https.address": "0.0.0.0:50475", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "dfs.namenode.secondary.http-address": "c6405.ambari.apache.org:50090", 
+            "nfs.exports.allowed.hosts": "* rw", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.datanode.http.address": "0.0.0.0:1022", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.secondary.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.web.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM", 
+            "dfs.http.policy": "HTTP_ONLY", 
+            "dfs.secondary.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", 
+            "dfs.datanode.keytab.file": "/etc/security/keytabs/dn.service.keytab", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.namenode.keytab.file": "/etc/security/keytabs/nn.service.keytab", 
+            "dfs.journalnode.https-address": "0.0.0.0:8481", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
+        }, 
+        "storm-env": {
+            "jmxremote_port": "56431", 
+            "storm_ui_keytab": "/etc/security/keytabs/spnego.service.keytab", 
+            "storm_log_dir": "/var/log/storm", 
+            "storm_principal_name": "storm@EXAMPLE.COM", 
+            "storm_pid_dir": "/var/run/storm", 
+            "storm_ui_principal_name": "HTTP/_HOST@EXAMPLE.COM", 
+            "nimbus_principal_name": "nimbus/_HOST@EXAMPLE.COM", 
+            "nimbus_keytab": "/etc/security/keytabs/nimbus.service.keytab", 
+            "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"\nexport STORM_HOME=/usr/hdp/current/storm-client", 
+            "storm_keytab": "/etc/security/keytabs/storm.service.keytab", 
+            "storm_user": "storm"
+        }, 
+        "hbase-site": {
+            "hbase.regionserver.wal.codec": "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec", 
+            "hbase.master.info.bindAddress": "0.0.0.0", 
+            "hbase.regionserver.port": "16020", 
+            "hbase.client.keyvalue.maxsize": "1048576", 
+            "hbase.regionserver.keytab.file": "/etc/security/keytabs/hbase.service.keytab", 
+            "hbase.hstore.compactionThreshold": "3", 
+            "hbase.hregion.majorcompaction.jitter": "0.50", 
+            "hbase.client.retries.number": "35", 
+            "hbase.bulkload.staging.dir": "/apps/hbase/staging", 
+            "hbase.rootdir": "hdfs://c6405.ambari.apache.org:8020/apps/hbase/data", 
+            "hbase.rpc.timeout": "60000", 
+            "hbase.regionserver.handler.count": "30", 
+            "hbase.hregion.majorcompaction": "604800000", 
+            "hbase.rpc.protection": "authentication", 
+            "hbase.bucketcache.size": "", 
+            "hbase.master.kerberos.principal": "hbase/_HOST@EXAMPLE.COM", 
+            "hbase.bucketcache.percentage.in.combinedcache": "", 
+            "hbase.hregion.memstore.flush.size": "134217728", 
+            "hbase.superuser": "hbase", 
+            "hbase.regionserver.global.memstore.lowerLimit": "0.38", 
+            "hbase.zookeeper.property.clientPort": "2181", 
+            "hbase.hregion.max.filesize": "1073741824", 
+            "hbase.regionserver.global.memstore.upperLimit": "0.4", 
+            "hbase.bucketcache.ioengine": "", 
+            "zookeeper.session.timeout": "90000", 
+            "hbase.regionserver.global.memstore.size": "${hbase.regionserver.global.memstore.upperLimit}", 
+            "hbase.tmp.dir": "/hadoop/hbase", 
+            "hfile.block.cache.size": "0.40", 
+            "hbase.regionserver.kerberos.principal": "hbase/_HOST@EXAMPLE.COM", 
+            "hbase.client.scanner.caching": "100", 
+            "hbase.security.authentication": "kerberos", 
+            "hbase.defaults.for.version.skip": "true", 
+            "hbase.master.info.port": "60010", 
+            "hbase.zookeeper.quorum": "c6405.ambari.apache.org", 
+            "hbase.regionserver.info.port": "16030", 
+            "zookeeper.znode.parent": "/hbase-secure", 
+            "hbase.zookeeper.useMulti": "true", 
+            "hbase.hstore.blockingStoreFiles": "10", 
+            "hbase.master.port": "16000", 
+            "hbase.security.authorization": "true", 
+            "hbase.master.keytab.file": "/etc/security/keytabs/hbase.service.keytab", 
+            "phoenix.query.timeoutMs": "60000", 
+            "hbase.local.dir": "${hbase.tmp.dir}/local", 
+            "hbase.cluster.distributed": "true", 
+            "hbase.hregion.memstore.mslab.enabled": "true", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController", 
+            "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController", 
+            "hbase.hregion.memstore.block.multiplier": "4"
+        }, 
+        "knox-env": {
+            "knox_master_secret": "password", 
+            "knox_pid_dir": "/var/run/knox", 
+            "knox_keytab_path": "/etc/security/keytabs/knox.service.keytab", 
+            "knox_group": "knox", 
+            "knox_user": "knox", 
+            "knox_principal_name": "knox/_HOST@EXAMPLE.COM"
+        }, 
+        "hadoop-policy": {
+            "security.job.client.protocol.acl": "*", 
+            "security.job.task.protocol.acl": "*", 
+            "security.datanode.protocol.acl": "*", 
+            "security.namenode.protocol.acl": "*", 
+            "security.client.datanode.protocol.acl": "*", 
+            "security.inter.tracker.protocol.acl": "*", 
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
+            "security.client.protocol.acl": "*", 
+            "security.refresh.policy.protocol.acl": "hadoop", 
+            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.inter.datanode.protocol.acl": "*"
+        }, 
+        "hdfs-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.fi
 le}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\n
 log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
+        }, 
+        "ranger-hbase-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "ranger-hbase-plugin-enabled": "No", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hbase", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hbase"
+        }, 
+        "krb5-conf": {
+            "domains": "EXAMPLE.COM", 
+            "manage_krb5_conf": "true", 
+            "content": "\n[libdefaults]\n  renew_lifetime = 7d\n  forwardable = true\n  default_realm = {{realm|upper()}}\n  ticket_lifetime = 24h\n  dns_lookup_realm = false\n  dns_lookup_kdc = false\n  #default_tgs_enctypes = {{encryption_types}}\n  #default_tkt_enctypes = {{encryption_types}}\n\n{% if domains %}\n[domain_realm]\n{% for domain in domains.split(',') %}\n  {{domain}} = {{realm|upper()}}\n{% endfor %}\n{% endif %}\n\n[logging]\n  default = FILE:/var/log/krb5kdc.log\n  admin_server = FILE:/var/log/kadmind.log\n  kdc = FILE:/var/log/krb5kdc.log\n\n[realms]\n  {{realm}} = {\n    admin_server = {{admin_server_host|default(kdc_host, True)}}\n    kdc = {{kdc_host}}\n  }\n\n{# Append additional realm declarations below #}\n    ", 
+            "conf_dir": "/etc"
+        }, 
+        "ldap-log4j": {
+            "content": "\n        # Licensed to the Apache Software Foundation (ASF) under one\n        # or more contributor license agreements.  See the NOTICE file\n        # distributed with this work for additional information\n        # regarding copyright ownership.  The ASF licenses this file\n        # to you under the Apache License, Version 2.0 (the\n        # \"License\"); you may not use this file except in compliance\n        # with the License.  You may obtain a copy of the License at\n        #\n        #     http://www.apache.org/licenses/LICENSE-2.0\n        #\n        # Unless required by applicable law or agreed to in writing, software\n        # distributed under the License is distributed on an \"AS IS\" BASIS,\n        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n        # See the License for the specific language governing permissions and\n        # limitations under the License.\n\n        app.log.dir=${launcher.dir}/../logs\n 
        app.log.file=${launcher.name}.log\n\n        log4j.rootLogger=ERROR, drfa\n        log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO\n        log4j.logger.org.apache.directory=WARN\n\n        log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n        log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n        log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n        log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n        log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n        log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n        log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n        log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n"
+        }, 
+        "core-site": {
+            "hadoop.http.authentication.cookie.domain": "", 
+            "proxyuser_group": "users", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
+            "hadoop.proxyuser.hdfs.groups": "*", 
+            "fs.trash.interval": "360", 
+            "hadoop.http.authentication.signer.secret.provider.object": "", 
+            "ipc.server.tcpnodelay": "true", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
+            "hadoop.proxyuser.knox.hosts": "c6405.ambari.apache.org", 
+            "ipc.client.idlethreshold": "8000", 
+            "hadoop.http.authentication.cookie.path": "", 
+            "hadoop.http.authentication.signer.secret.provider": "", 
+            "hadoop.http.authentication.signature.secret": "", 
+            "hadoop.rpc.protection": "authentication", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.security.authentication": "kerberos", 
+            "hadoop.http.filter.initializers": "", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "hadoop.proxyuser.knox.groups": "users", 
+            "hadoop.proxyuser.hdfs.hosts": "*", 
+            "hadoop.proxyuser.HTTP.groups": "users", 
+            "fs.defaultFS": "hdfs://c6405.ambari.apache.org:8020", 
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
+            "hadoop.http.authentication.signature.secret.file": "", 
+            "hadoop.http.authentication.token.validity": "", 
+            "hadoop.http.authentication.type": "simple", 
+            "hadoop.security.authorization": "true", 
+            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
+            "ipc.client.connect.max.retries": "50", 
+            "hadoop.security.auth_to_local": "RULE:[1:$1@$0](ambari-qa@EXAMPLE.COM)s/.*/ambari-qa/\nRULE:[1:$1@$0](hbase@EXAMPLE.COM)s/.*/hbase/\nRULE:[1:$1@$0](hdfs@EXAMPLE.COM)s/.*/hdfs/\nRULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nRULE:[2:$1@$0](dn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](hbase@EXAMPLE.COM)s/.*/hbase/\nRULE:[2:$1@$0](jn@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](knox@EXAMPLE.COM)s/.*/knox/\nRULE:[2:$1@$0](nfs@EXAMPLE.COM)s/.*/hdfs/\nRULE:[2:$1@$0](nn@EXAMPLE.COM)s/.*/hdfs/\nDEFAULT", 
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "hadoop.http.authentication.kerberos.name.rules": "", 
+            "ipc.client.connection.maxidletime": "30000"
+        }, 
+        "hadoop-env": {
+            "proxyuser_group": "users", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "hdfs_user": "hdfs", 
+            "namenode_opt_maxnewsize": "256m", 
+            "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", 
+            "namenode_opt_maxpermsize": "256m", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:M
 axPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DR
 FAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environme
 nt.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of
  hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-cl
 ient/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"", 
+            "namenode_heapsize": "1024m", 
+            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
+            "namenode_opt_newsize": "256m", 
+            "nfsgateway_heapsize": "1024", 
+            "dtnode_heapsize": "1024m", 
+            "hadoop_root_logger": "INFO,RFA", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "namenode_opt_permsize": "128m", 
+            "hdfs_principal_name": "hdfs@EXAMPLE.COM"
+        }, 
+        "zookeeper-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
+        }, 
+        "topology": {
+            "content": "\n        <topology>\n\n            <gateway>\n\n                <provider>\n                    <role>authentication</role>\n                    <name>ShiroProvider</name>\n                    <enabled>true</enabled>\n                    <param>\n                        <name>sessionTimeout</name>\n                        <value>30</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm</name>\n                        <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm.userDnTemplate</name>\n                        <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm.contextFactory.url</name>\n                        <value>ldap://{{knox_host_name}}:33389</value>\n           
          </param>\n                    <param>\n                        <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n                        <value>simple</value>\n                    </param>\n                    <param>\n                        <name>urls./**</name>\n                        <value>authcBasic</value>\n                    </param>\n                </provider>\n\n                <provider>\n                    <role>identity-assertion</role>\n                    <name>Default</name>\n                    <enabled>true</enabled>\n                </provider>\n\n                <provider>\n                    <role>authorization</role>\n                    <name>AclsAuthz</name>\n                    <enabled>true</enabled>\n                </provider>\n\n            </gateway>\n\n            <service>\n                <role>NAMENODE</role>\n                <url>hdfs://{{namenode_host}}:{{namenode_rpc_port}}</url>\n            </service>\n\n        
     <service>\n                <role>JOBTRACKER</role>\n                <url>rpc://{{rm_host}}:{{jt_rpc_port}}</url>\n            </service>\n\n            <service>\n                <role>WEBHDFS</role>\n                <url>http://{{namenode_host}}:{{namenode_http_port}}/webhdfs</url>\n            </service>\n\n            <service>\n                <role>WEBHCAT</role>\n                <url>http://{{webhcat_server_host}}:{{templeton_port}}/templeton</url>\n            </service>\n\n            <service>\n                <role>OOZIE</role>\n                <url>http://{{oozie_server_host}}:{{oozie_server_port}}/oozie</url>\n            </service>\n\n            <service>\n                <role>WEBHBASE</role>\n                <url>http://{{hbase_master_host}}:{{hbase_master_port}}</url>\n            </service>\n\n            <service>\n                <role>HIVE</role>\n                <url>http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}</url>\n            </serv
 ice>\n\n            <service>\n                <role>RESOURCEMANAGER</role>\n                <url>http://{{rm_host}}:{{rm_port}}/ws</url>\n            </service>\n        </topology>"
+        }, 
+        "hbase-log4j": {
+            "content": "log4jproperties\nline2"
+        }, 
+        "oozie-site": {
+            "oozie.service.ProxyUserService.proxyuser.knox.hosts": "c6405.ambari.apache.org", 
+            "oozie.service.ProxyUserService.proxyuser.knox.groups": "users"
+        }, 
+        "gateway-site": {
+            "java.security.auth.login.config": "/etc/knox/conf/krb5JAASLogin.conf", 
+            "gateway.hadoop.kerberos.secured": "true", 
+            "gateway.gateway.conf.dir": "deployments", 
+            "gateway.path": "gateway", 
+            "sun.security.krb5.debug": "true", 
+            "gateway.port": "8443", 
+            "java.security.krb5.conf": "/etc/krb5.conf"
+        }, 
+        "hbase-env": {
+            "hbase_pid_dir": "/var/run/hbase", 
+            "hbase_regionserver_xmn_max": "512", 
+            "hbase_regionserver_xmn_ratio": "0.2", 
+            "hbase_user": "hbase", 
+            "hbase_master_heapsize": "1024m", 
+            "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}\"\n{% endif %}", 
+            "hbase_user_keytab": "/etc/security/keytabs/hbase.headless.keytab", 
+            "hbase_regionserver_heapsize": "1024m", 
+            "hbase_log_dir": "/var/log/hbase", 
+            "hbase_max_direct_memory_size": "", 
+            "hbase_principal_name": "hbase@EXAMPLE.COM"
+        }, 
+        "zookeeper-env": {
+            "zookeeper_keytab_path": "/etc/security/keytabs/zk.service.keytab", 
+            "zk_user": "zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zookeeper_principal_name": "zookeeper/_HOST@EXAMPLE.COM"
+        }, 
+        "zoo.cfg": {
+            "clientPort": "2181", 
+            "autopurge.purgeInterval": "24", 
+            "syncLimit": "5", 
+            "dataDir": "/hadoop/zookeeper", 
+            "initLimit": "10", 
+            "tickTime": "2000", 
+            "autopurge.snapRetainCount": "30"
+        }, 
+        "ranger-storm-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "ranger-storm-plugin-enabled": "No", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "storm", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "stormtestuser@EXAMPLE.COM", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "stormtestuser"
+        }, 
+        "webhcat-site": {
+            "webhcat.proxyuser.knox.hosts": "c6405.ambari.apache.org", 
+            "webhcat.proxyuser.knox.groups": "users"
+        }, 
+        "users-ldif": {
+            "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nversion: 1\n\n# Please replace with site specific values\ndn: dc=hadoop,dc=apache,dc=org\nobjectclass: organization\nobjectclass: dcObject\no: Hadoop\ndc: hadoop\n\n#
  Entry for a sample people container\n# Please replace with site specific values\ndn: ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:organizationalUnit\nou: people\n\n# Entry for a sample end user\n# Please replace with site specific values\ndn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: Guest\nsn: User\nuid: guest\nuserPassword:guest-password\n\n# entry for sample user admin\ndn: uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: Admin\nsn: Admin\nuid: admin\nuserPassword:admin-password\n\n# entry for sample user sam\ndn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: sam\nsn: sam\nuid: sam\nuserPassword:sam-password\n\n# entry for sample user tom\ndn: uid=tom,
 ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: tom\nsn: tom\nuid: tom\nuserPassword:tom-password\n\n# create FIRST Level groups branch\ndn: ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:organizationalUnit\nou: groups\ndescription: generic groups branch\n\n# create the analyst group under groups\ndn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass: groupofnames\ncn: analyst\ndescription:analyst  group\nmember: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\nmember: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n# create the scientist group under groups\ndn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass: groupofnames\ncn: scientist\ndescription: scientist group\nmember: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org"
+        }, 
+        "cluster-env": {
+            "security_enabled": "true", 
+            "hive_tar_source": "/usr/hdp/current/hive-client/hive.tar.gz", 
+            "hadoop-streaming_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mapreduce/", 
+            "pig_tar_source": "/usr/hdp/current/pig-client/pig.tar.gz", 
+            "hive_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/hive/", 
+            "smokeuser_principal_name": "ambari-qa@EXAMPLE.COM", 
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
+            "hadoop-streaming_tar_source": "/usr/hdp/current/hadoop-mapreduce-client/hadoop-streaming.jar", 
+            "kerberos_domain": "EXAMPLE.COM", 
+            "tez_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/tez/", 
+            "mapreduce_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mapreduce/", 
+            "tez_tar_source": "/usr/hdp/current/tez-client/lib/tez.tar.gz", 
+            "pig_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/", 
+            "user_group": "hadoop", 
+            "ignore_groupsusers_create": "false", 
+            "mapreduce_tar_source": "/usr/hdp/current/hadoop-client/mapreduce.tar.gz", 
+            "sqoop_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/", 
+            "smokeuser": "ambari-qa", 
+            "sqoop_tar_source": "/usr/hdp/current/sqoop-client/sqoop.tar.gz"
+        }
+    }, 
+    "configurationTags": {
+        "ranger-knox-plugin-properties": {
+            "tag": "version1429063570674"
+        }, 
+        "gateway-log4j": {
+            "tag": "version1429063570674"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "tag": "version1"
+        }, 
+        "hbase-policy": {
+            "tag": "version1"
+        }, 
+        "kerberos-env": {
+            "tag": "version1429065728624"
+        }, 
+        "storm-site": {
+            "tag": "version1429066050500"
+        }, 
+        "hdfs-site": {
+            "tag": "version1429066050308"
+        }, 
+        "storm-env": {
+            "tag": "version1429066050460"
+        }, 
+        "hbase-site": {
+            "tag": "version1429066050414"
+        }, 
+        "knox-env": {
+            "tag": "version1429066050383"
+        }, 
+        "hadoop-policy": {
+            "tag": "version1"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "ranger-hbase-plugin-properties": {
+            "tag": "version1"
+        }, 
+        "krb5-conf": {
+            "tag": "version1429065728624"
+        }, 
+        "ldap-log4j": {
+            "tag": "version1429063570674"
+        }, 
+        "core-site": {
+            "tag": "version1429066050580"
+        }, 
+        "hadoop-env": {
+            "tag": "version1429066050476"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "topology": {
+            "tag": "version1429063570674"
+        }, 
+        "hbase-log4j": {
+            "tag": "version1"
+        }, 
+        "oozie-site": {
+            "tag": "version1"
+        }, 
+        "gateway-site": {
+            "tag": "version1429066050442"
+        }, 
+        "hbase-env": {
+            "tag": "version1429066050343"
+        }, 
+        "zookeeper-env": {
+            "tag": "version1429066050522"
+        }, 
+        "zoo.cfg": {
+            "tag": "version1"
+        }, 
+        "ranger-storm-plugin-properties": {
+            "tag": "version1429063043219"
+        }, 
+        "webhcat-site": {
+            "tag": "version1"
+        }, 
+        "users-ldif": {
+            "tag": "version1429063570674"
+        }, 
+        "cluster-env": {
+            "tag": "version1429066050554"
+        }
+    }, 
+    "commandId": "17-0", 
+    "clusterHostInfo": {
+        "snamenode_host": [
+            "c6405.ambari.apache.org"
+        ], 
+        "drpc_server_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "nimbus_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670"
+        ], 
+        "knox_gateway_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "hbase_rs_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6405.ambari.apache.org"
+        ], 
+        "hbase_master_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "storm_ui_server_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "all_racks": [
+            "/default-rack"
+        ], 
+        "all_ipv4_ips": [
+            "192.168.64.105"
+        ], 
+        "ambari_server_host": [
+            "c6405.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "supervisor_hosts": [
+            "c6405.ambari.apache.org"
+        ]
+    }
+}


[3/8] ambari git commit: AMBARI-9993. Add support for management of Phoenix Query Server to HDP Stack (Nick Dimiduk via alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json b/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
new file mode 100644
index 0000000..e40c3c4
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
@@ -0,0 +1,713 @@
+{
+    "roleCommand": "SERVICE_CHECK", 
+    "clusterName": "c1", 
+    "hostname": "c6401.ambari.apache.org", 
+    "hostLevelParams": {
+        "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 
+        "jce_name": "UnlimitedJCEPolicyJDK7.zip", 
+        "stack_version": "2.0",
+        "stack_name": "HDP", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "jdk_name": "jdk-7u67-linux-x64.tar.gz",
+        "ambari_db_rca_username": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.7.0_45",
+        "java_version": "8",
+        "db_name": "ambari"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {}, 
+    "serviceName": "OOZIE", 
+    "role": "OOZIE_SERVICE_CHECK", 
+    "commandParams": {
+        "command_timeout": "300", 
+        "service_package_folder": "OOZIE",
+        "script_type": "PYTHON",
+        "script": "scripts/service_check.py",
+        "excluded_hosts": "host1",
+        "mark_draining_only": "true"
+    },
+    "taskId": 152, 
+    "public_hostname": "c6401.ambari.apache.org", 
+    "configurations": {
+        "mapred-site": {
+            "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 
+            "mapreduce.cluster.administrators": " hadoop", 
+            "mapreduce.reduce.input.buffer.percent": "0.0", 
+            "mapreduce.output.fileoutputformat.compress": "false", 
+            "mapreduce.framework.name": "yarn", 
+            "mapreduce.map.speculative": "false", 
+            "mapreduce.reduce.shuffle.merge.percent": "0.66", 
+            "yarn.app.mapreduce.am.resource.mb": "683", 
+            "mapreduce.map.java.opts": "-Xmx273m", 
+            "mapreduce.application.classpath": "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*", 
+            "mapreduce.job.reduce.slowstart.completedmaps": "0.05", 
+            "mapreduce.output.fileoutputformat.compress.type": "BLOCK", 
+            "mapreduce.reduce.speculative": "false", 
+            "mapreduce.reduce.java.opts": "-Xmx546m", 
+            "mapreduce.am.max-attempts": "2", 
+            "yarn.app.mapreduce.am.admin-command-opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.reduce.log.level": "INFO", 
+            "mapreduce.map.sort.spill.percent": "0.7", 
+            "mapreduce.task.timeout": "300000", 
+            "mapreduce.map.memory.mb": "341", 
+            "mapreduce.task.io.sort.factor": "100", 
+            "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp", 
+            "mapreduce.reduce.memory.mb": "683", 
+            "yarn.app.mapreduce.am.log.level": "INFO", 
+            "mapreduce.map.log.level": "INFO", 
+            "mapreduce.shuffle.port": "13562", 
+            "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`", 
+            "mapreduce.map.output.compress": "false", 
+            "yarn.app.mapreduce.am.staging-dir": "/user", 
+            "mapreduce.reduce.shuffle.parallelcopies": "30", 
+            "mapreduce.reduce.shuffle.input.buffer.percent": "0.7", 
+            "mapreduce.jobhistory.webapp.address": "c6402.ambari.apache.org:19888", 
+            "mapreduce.jobhistory.done-dir": "/mr-history/done", 
+            "mapreduce.admin.reduce.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN", 
+            "mapreduce.task.io.sort.mb": "136", 
+            "yarn.app.mapreduce.am.command-opts": "-Xmx546m", 
+            "mapreduce.admin.map.child.java.opts": "-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN"
+        }, 
+        "oozie-site": {
+            "oozie.service.PurgeService.purge.interval": "3600", 
+            "oozie.service.CallableQueueService.queue.size": "1000", 
+            "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd",
+            "oozie.service.JPAService.jdbc.url": "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true", 
+            "oozie.service.HadoopAccessorService.nameNode.whitelist": " ", 
+            "use.system.libpath.for.mapreduce.and.pig.jobs": "false", 
+            "oozie.db.schema.name": "oozie", 
+            "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials", 
+            "oozie.service.JPAService.create.db.schema": "false", 
+            "oozie.authentication.kerberos.name.rules": "\n        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        DEFAULT", 
+            "oozie.service.ActionService.executor.ext.classes": "\n            org.apache.oozie.action.email.EmailActionExecutor,\n            org.apache.oozie.action.hadoop.HiveActionExecutor,\n            org.apache.oozie.action.hadoop.ShellActionExecutor,\n            org.apache.oozie.action.hadoop.SqoopActionExecutor,\n            org.apache.oozie.action.hadoop.DistcpActionExecutor", 
+            "oozie.base.url": "http://c6402.ambari.apache.org:11000/oozie", 
+            "oozie.service.JPAService.jdbc.password": "asd", 
+            "oozie.service.coord.normal.default.timeout": "120", 
+            "oozie.service.AuthorizationService.security.enabled": "true", 
+            "oozie.service.JPAService.pool.max.active.conn": "10", 
+            "oozie.service.PurgeService.older.than": "30", 
+            "oozie.service.coord.push.check.requeue.interval": "30000", 
+            "oozie.service.HadoopAccessorService.hadoop.configurations": "*=/etc/hadoop/conf", 
+            "oozie.service.HadoopAccessorService.jobTracker.whitelist": " ", 
+            "oozie.service.CallableQueueService.callable.concurrency": "3", 
+            "oozie.service.JPAService.jdbc.username": "oozie", 
+            "oozie.service.CallableQueueService.threads": "10", 
+            "oozie.services.ext": "org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService", 
+            "oozie.systemmode": "NORMAL", 
+            "oozie.service.WorkflowAppService.system.libpath": "/user/${user.name}/share/lib", 
+            "oozie.services": "\n        org.apache.oozie.service.SchedulerService,\n        org.apache.oozie.service.InstrumentationService,\n        org.apache.oozie.service.CallableQueueService,\n        org.apache.oozie.service.UUIDService,\n        org.apache.oozie.service.ELService,\n        org.apache.oozie.service.AuthorizationService,\n        org.apache.oozie.service.UserGroupInformationService,\n        org.apache.oozie.service.HadoopAccessorService,\n        org.apache.oozie.service.URIHandlerService,\n        org.apache.oozie.service.MemoryLocksService,\n        org.apache.oozie.service.DagXLogInfoService,\n        org.apache.oozie.service.SchemaService,\n        org.apache.oozie.service.LiteWorkflowAppService,\n        org.apache.oozie.service.JPAService,\n        org.apache.oozie.service.StoreService,\n        org.apache.oozie.service.CoordinatorStoreService,\n        org.apache.oozie.service.SLAStoreService,\n        org.apache.oozie.service.DBLiteWorkflowStoreServic
 e,\n        org.apache.oozie.service.CallbackService,\n        org.apache.oozie.service.ActionService,\n        org.apache.oozie.service.ActionCheckerService,\n        org.apache.oozie.service.RecoveryService,\n        org.apache.oozie.service.PurgeService,\n        org.apache.oozie.service.CoordinatorEngineService,\n        org.apache.oozie.service.BundleEngineService,\n        org.apache.oozie.service.DagEngineService,\n        org.apache.oozie.service.CoordMaterializeTriggerService,\n        org.apache.oozie.service.StatusTransitService,\n        org.apache.oozie.service.PauseTransitService,\n        org.apache.oozie.service.GroupsService,\n        org.apache.oozie.service.ProxyUserService", 
+            "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler", 
+            "oozie.authentication.type": "simple", 
+            "oozie.service.JPAService.jdbc.driver": "org.apache.derby.jdbc.EmbeddedDriver", 
+            "oozie.system.id": "oozie-${user.name}"
+        }, 
+        "storm-site": {
+            "topology.tuple.serializer": "backtype.storm.serialization.types.ListDelegateSerializer", 
+            "topology.workers": "1", 
+            "drpc.worker.threads": "64", 
+            "storm.zookeeper.servers": "['c6401.ambari.apache.org','c6402.ambari.apache.org']", 
+            "supervisor.heartbeat.frequency.secs": "5", 
+            "topology.executor.send.buffer.size": "1024", 
+            "drpc.childopts": "-Xmx768m", 
+            "nimbus.thrift.port": "6627", 
+            "storm.zookeeper.retry.intervalceiling.millis": "30000", 
+            "storm.local.dir": "/hadoop/storm", 
+            "topology.receiver.buffer.size": "8", 
+            "storm.messaging.netty.client_worker_threads": "1", 
+            "transactional.zookeeper.root": "/transactional", 
+            "drpc.request.timeout.secs": "600", 
+            "topology.skip.missing.kryo.registrations": "false", 
+            "worker.heartbeat.frequency.secs": "1", 
+            "zmq.hwm": "0", 
+            "storm.zookeeper.connection.timeout": "15000", 
+            "topology.max.error.report.per.interval": "5", 
+            "storm.messaging.netty.server_worker_threads": "1", 
+            "supervisor.worker.start.timeout.secs": "120", 
+            "zmq.threads": "1", 
+            "topology.acker.executors": "null", 
+            "storm.local.mode.zmq": "false", 
+            "topology.max.task.parallelism": "null", 
+            "storm.zookeeper.port": "2181", 
+            "nimbus.childopts": "-Xmx1024m", 
+            "worker.childopts": "-Xmx768m", 
+            "drpc.queue.size": "128", 
+            "storm.zookeeper.retry.times": "5", 
+            "nimbus.monitor.freq.secs": "10", 
+            "storm.cluster.mode": "distributed", 
+            "dev.zookeeper.path": "/tmp/dev-storm-zookeeper", 
+            "drpc.invocations.port": "3773", 
+            "storm.zookeeper.root": "/storm", 
+            "logviewer.childopts": "-Xmx128m", 
+            "transactional.zookeeper.port": "null", 
+            "topology.worker.childopts": "null", 
+            "topology.max.spout.pending": "null", 
+            "nimbus.cleanup.inbox.freq.secs": "600", 
+            "storm.messaging.netty.min_wait_ms": "100", 
+            "nimbus.task.timeout.secs": "30", 
+            "nimbus.thrift.max_buffer_size": "1048576", 
+            "topology.sleep.spout.wait.strategy.time.ms": "1", 
+            "topology.optimize": "true", 
+            "nimbus.reassign": "true", 
+            "storm.messaging.transport": "backtype.storm.messaging.netty.Context", 
+            "logviewer.appender.name": "A1", 
+            "nimbus.host": "c6401.ambari.apache.org", 
+            "ui.port": "8744", 
+            "supervisor.slots.ports": "[6700, 6701]", 
+            "nimbus.file.copy.expiration.secs": "600", 
+            "supervisor.monitor.frequency.secs": "3", 
+            "ui.childopts": "-Xmx768m", 
+            "transactional.zookeeper.servers": "null", 
+            "zmq.linger.millis": "5000", 
+            "topology.error.throttle.interval.secs": "10", 
+            "topology.worker.shared.thread.pool.size": "4", 
+            "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib", 
+            "topology.spout.wait.strategy": "backtype.storm.spout.SleepSpoutWaitStrategy", 
+            "task.heartbeat.frequency.secs": "3", 
+            "topology.transfer.buffer.size": "1024", 
+            "storm.zookeeper.session.timeout": "20000", 
+            "topology.executor.receive.buffer.size": "1024", 
+            "topology.stats.sample.rate": "0.05", 
+            "topology.fall.back.on.java.serialization": "true", 
+            "supervisor.childopts": "-Xmx256m", 
+            "topology.enable.message.timeouts": "true", 
+            "storm.messaging.netty.max_wait_ms": "1000", 
+            "nimbus.topology.validator": "backtype.storm.nimbus.DefaultTopologyValidator", 
+            "nimbus.supervisor.timeout.secs": "60", 
+            "topology.disruptor.wait.strategy": "com.lmax.disruptor.BlockingWaitStrategy", 
+            "nimbus.inbox.jar.expiration.secs": "3600", 
+            "drpc.port": "3772", 
+            "topology.kryo.factory": "backtype.storm.serialization.DefaultKryoFactory", 
+            "storm.zookeeper.retry.interval": "1000", 
+            "storm.messaging.netty.max_retries": "30", 
+            "topology.tick.tuple.freq.secs": "null", 
+            "supervisor.enable": "true", 
+            "nimbus.task.launch.secs": "120", 
+            "task.refresh.poll.secs": "10", 
+            "topology.message.timeout.secs": "30", 
+            "storm.messaging.netty.buffer_size": "5242880", 
+            "topology.state.synchronization.timeout.secs": "60", 
+            "supervisor.worker.timeout.secs": "30", 
+            "topology.trident.batch.emit.interval.millis": "500", 
+            "topology.builtin.metrics.bucket.size.secs": "60", 
+            "storm.thrift.transport": "backtype.storm.security.auth.SimpleTransportPlugin", 
+            "logviewer.port": "8000", 
+            "topology.debug": "false"
+        }, 
+        "webhcat-site": {
+            "templeton.pig.path": "pig.tar.gz/pig/bin/pig", 
+            "templeton.exec.timeout": "60000", 
+            "templeton.override.enabled": "false", 
+            "templeton.jar": "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar", 
+            "templeton.zookeeper.hosts": "c6401.ambari.apache.org:2181,c6402.ambari.apache.org:2181", 
+            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://c6402.ambari.apache.org:9083,hive.metastore.sasl.enabled=yes,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse", 
+            "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage", 
+            "templeton.hive.archive": "hdfs:///apps/webhcat/hive.tar.gz", 
+            "templeton.streaming.jar": "hdfs:///apps/webhcat/hadoop-streaming.jar", 
+            "templeton.port": "50111", 
+            "templeton.libjars": "/usr/lib/zookeeper/zookeeper.jar", 
+            "templeton.hadoop": "/usr/bin/hadoop", 
+            "templeton.hive.path": "hive.tar.gz/hive/bin/hive", 
+            "templeton.hadoop.conf.dir": "/etc/hadoop/conf", 
+            "templeton.hcat": "/usr/bin/hcat", 
+            "templeton.pig.archive": "hdfs:///apps/webhcat/pig.tar.gz"
+        }, 
+        "capacity-scheduler": {
+            "yarn.scheduler.capacity.node-locality-delay": "40", 
+            "yarn.scheduler.capacity.root.capacity": "100", 
+            "yarn.scheduler.capacity.root.acl_administer_queue": "*",
+            "yarn.scheduler.capacity.root.queues": "default", 
+            "yarn.scheduler.capacity.maximum-applications": "10000", 
+            "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
+            "yarn.scheduler.capacity.root.default.maximum-capacity": "100", 
+            "yarn.scheduler.capacity.root.default.state": "RUNNING", 
+            "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2", 
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*", 
+            "yarn.scheduler.capacity.root.default.capacity": "100", 
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "*"
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.checkpoint.period": "21600", 
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.replication": "3", 
+            "ambari.dfs.datanode.http.port": "50075", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1.0f", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.permissions.enabled": "true", 
+            "fs.checkpoint.size": "67108864", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.namenode.https-address": "c6401.ambari.apache.org:50470", 
+            "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal", 
+            "dfs.blocksize": "134217728", 
+            "dfs.datanode.max.transfer.threads": "1024", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.namenode.handler.count": "100", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.datanode.http.address": "0.0.0.0:50075",
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6401.ambari.apache.org:50070", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "ambari.dfs.datanode.port": "50010", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.https.port": "50470", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
+        }, 
+        "hbase-site": {
+            "hbase.hstore.flush.retries.number": "120", 
+            "hbase.client.keyvalue.maxsize": "10485760", 
+            "hbase.hstore.compactionThreshold": "3", 
+            "hbase.rootdir": "hdfs://c6401.ambari.apache.org:8020/apps/hbase/data", 
+            "hbase.regionserver.handler.count": "60", 
+            "hbase.regionserver.global.memstore.lowerLimit": "0.38", 
+            "hbase.hregion.memstore.block.multiplier": "2", 
+            "hbase.hregion.memstore.flush.size": "134217728", 
+            "hbase.superuser": "hbase", 
+            "hbase.zookeeper.property.clientPort": "2181", 
+            "hbase.regionserver.global.memstore.upperLimit": "0.4", 
+            "zookeeper.session.timeout": "30000", 
+            "hbase.tmp.dir": "/hadoop/hbase", 
+            "hbase.local.dir": "${hbase.tmp.dir}/local", 
+            "hbase.hregion.max.filesize": "10737418240", 
+            "hfile.block.cache.size": "0.40", 
+            "hbase.security.authentication": "simple", 
+            "hbase.defaults.for.version.skip": "true", 
+            "hbase.zookeeper.quorum": "c6401.ambari.apache.org,c6402.ambari.apache.org", 
+            "zookeeper.znode.parent": "/hbase-unsecure", 
+            "hbase.hstore.blockingStoreFiles": "10",
+            "hbase.master.port": "60000",
+            "hbase.hregion.majorcompaction": "86400000", 
+            "hbase.security.authorization": "false", 
+            "hbase.cluster.distributed": "true", 
+            "hbase.hregion.memstore.mslab.enabled": "true", 
+            "hbase.client.scanner.caching": "100", 
+            "hbase.zookeeper.useMulti": "true"
+        }, 
+        "core-site": {
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "gluster.daemon.user": "null", 
+            "hadoop.proxyuser.oozie.groups": "users", 
+            "hadoop.proxyuser.hcat.hosts": "c6402.ambari.apache.org", 
+            "hadoop.proxyuser.hive.groups": "users", 
+            "hadoop.security.authentication": "simple", 
+            "hadoop.proxyuser.oozie.hosts": "c6402.ambari.apache.org", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "fs.AbstractFileSystem.glusterfs.impl": "null", 
+            "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020", 
+            "fs.trash.interval": "360", 
+            "ipc.client.idlethreshold": "8000", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.security.authorization": "false", 
+            "hadoop.proxyuser.hive.hosts": "c6402.ambari.apache.org", 
+            "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT", 
+            "hadoop.proxyuser.hcat.groups": "users", 
+            "ipc.client.connection.maxidletime": "30000", 
+            "ipc.client.connect.max.retries": "50"
+        }, 
+        "hive-site": {
+            "hive.enforce.sorting": "true", 
+            "javax.jdo.option.ConnectionPassword": "asd", 
+            "javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver", 
+            "hive.optimize.bucketmapjoin.sortedmerge": "true", 
+            "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", 
+            "fs.file.impl.disable.cache": "true", 
+            "hive.auto.convert.join.noconditionaltask": "true", 
+            "hive.map.aggr": "true", 
+            "hive.optimize.index.filter": "true", 
+            "hive.security.authorization.enabled": "false", 
+            "hive.optimize.reducededuplication.min.reducer": "1", 
+            "hive.optimize.bucketmapjoin": "true", 
+            "hive.metastore.uris": "thrift://c6402.ambari.apache.org:9083", 
+            "hive.mapjoin.bucket.cache.size": "10000", 
+            "hive.auto.convert.join.noconditionaltask.size": "1000000000", 
+            "hive.vectorized.execution.enabled": "false", 
+            "javax.jdo.option.ConnectionUserName": "hive", 
+            "hive.metastore.cache.pinobjtypes": "Table,Database,Type,FieldSchema,Order", 
+            "hive.optimize.reducededuplication": "true", 
+            "hive.metastore.warehouse.dir": "/apps/hive/warehouse", 
+            "hive.metastore.client.socket.timeout": "60",
+            "hive.auto.convert.join": "true", 
+            "hive.enforce.bucketing": "true", 
+            "hive.mapred.reduce.tasks.speculative.execution": "false", 
+            "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator", 
+            "javax.jdo.option.ConnectionURL": "jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true", 
+            "hive.auto.convert.sortmerge.join": "true", 
+            "fs.hdfs.impl.disable.cache": "true", 
+            "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider", 
+            "ambari.hive.db.schema.name": "hive", 
+            "hive.metastore.execute.setugi": "true", 
+            "hive.auto.convert.sortmerge.join.noconditionaltask": "true", 
+            "hive.server2.enable.doAs": "true", 
+            "hive.optimize.mapjoin.mapreduce": "true"
+        }, 
+        "yarn-site": {
+            "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25", 
+            "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor", 
+            "yarn.nodemanager.local-dirs": "/hadoop/yarn/local", 
+            "yarn.resourcemanager.resource-tracker.address": "c6402.ambari.apache.org:8025", 
+            "yarn.nodemanager.remote-app-log-dir-suffix": "logs", 
+            "yarn.resourcemanager.hostname": "c6402.ambari.apache.org", 
+            "yarn.nodemanager.health-checker.script.timeout-ms": "60000", 
+            "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler", 
+            "yarn.nodemanager.resource.memory-mb": "2048", 
+            "yarn.scheduler.minimum-allocation-mb": "683", 
+            "yarn.resourcemanager.address": "c6402.ambari.apache.org:8050", 
+            "yarn.resourcemanager.scheduler.address": "c6402.ambari.apache.org:8030", 
+            "yarn.log-aggregation.retain-seconds": "2592000", 
+            "yarn.scheduler.maximum-allocation-mb": "2048", 
+            "yarn.log-aggregation-enable": "true", 
+            "yarn.nodemanager.address": "0.0.0.0:45454", 
+            "yarn.nodemanager.container-monitor.interval-ms": "3000", 
+            "yarn.nodemanager.log-aggregation.compression-type": "gz", 
+            "yarn.nodemanager.log.retain-second": "604800", 
+            "yarn.nodemanager.delete.debug-delay-sec": "0", 
+            "yarn.nodemanager.log-dirs": "/hadoop/yarn/log", 
+            "yarn.nodemanager.health-checker.interval-ms": "135000", 
+            "yarn.resourcemanager.am.max-attempts": "2", 
+            "yarn.nodemanager.remote-app-log-dir": "/app-logs", 
+            "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX", 
+            "yarn.nodemanager.aux-services": "mapreduce_shuffle", 
+            "yarn.nodemanager.vmem-check-enabled": "false", 
+            "yarn.nodemanager.vmem-pmem-ratio": "2.1", 
+            "yarn.admin.acl": "*", 
+            "yarn.resourcemanager.webapp.address": "c6402.ambari.apache.org:8088", 
+            "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude", 
+            "yarn.nodemanager.linux-container-executor.group": "hadoop", 
+            "yarn.acl.enable": "true", 
+            "yarn.log.server.url": "http://c6402.ambari.apache.org:19888/jobhistory/logs", 
+            "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*", 
+            "yarn.resourcemanager.admin.address": "c6402.ambari.apache.org:8141", 
+            "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler"
+        },
+        "tez-site": {
+            "tez.am.log.level": "WARN",
+            "tez.lib.uris": "file:///usr/lib/tez/,file:///usr/lib/tez/lib/",
+            "tez.staging-dir": "/tmp/${user.name}/staging",
+            "tez.am.am-rm.heartbeat.interval-ms.max": "250"
+        },
+                "tez-site": {
+            "tez.am.log.level": "WARN",
+            "tez.lib.uris": "hdfs:///apps/tez/,hdfs:///apps/tez/lib/",
+            "tez.staging-dir": "/tmp/${user.name}/staging",
+            "tez.am.am-rm.heartbeat.interval-ms.max": "250"
+        },
+        "yarn-env": {
+            "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
+            "apptimelineserver_heapsize": "1024", 
+            "nodemanager_heapsize": "1024", 
+            "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_M
 AX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to 
 specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be
  appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=$
 {YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"", 
+            "yarn_heapsize": "1024", 
+            "yarn_user": "yarn", 
+            "resourcemanager_heapsize": "1024", 
+            "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
+            "min_user_id": "1000"
+        },
+        "cluster-env": {
+            "security_enabled": "false",
+            "ignore_groupsusers_create": "false",
+            "smokeuser": "ambari-qa",
+            "kerberos_domain": "EXAMPLE.COM",
+            "user_group": "hadoop"
+        },
+        "hadoop-env": {
+            "namenode_opt_maxnewsize": "200m",
+            "hdfs_log_dir_prefix": "/var/log/hadoop",
+            "namenode_heapsize": "1024m", 
+            "namenode_opt_newsize": "200m",
+            "namenode_opt_permsize" : "128m",
+            "namenode_opt_maxpermsize" : "256m",
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by defaul
 t.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}
 }/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nHADOOP_DATANODE_OPTS=\"-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/
 $USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HAD
 OOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n#
  The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64", 
+            "hdfs_user": "hdfs",
+            "dtnode_heapsize": "1024m", 
+            "proxyuser_group": "users",
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
+        },
+        "hive-env": {
+            "hcat_pid_dir": "/var/run/webhcat", 
+            "hcat_user": "hcat", 
+            "hive_ambari_database": "MySQL", 
+            "hive_hostname": "abtest-3.c.pramod-thangali.internal", 
+            "hive_metastore_port": "9083", 
+            "webhcat_user": "hcat", 
+            "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Director
 y can be controlled by:\nexport HIVE_CONF_DIR={{conf_dir}}\n\n# Folder containing extra ibraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/lib/hive-hcatalog/\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar\nelse\n  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar\nfi\nexport METASTORE_PORT={{hive_metastore_port}}", 
+            "hive_database_name": "hive", 
+            "hive_database_type": "mysql", 
+            "hive_pid_dir": "/var/run/hive", 
+            "hive_log_dir": "/var/log/hive", 
+            "hive_user": "hive", 
+            "hcat_log_dir": "/var/log/webhcat", 
+            "hive_database": "New MySQL Database"
+        },
+        "hbase-env": {
+            "hbase_pid_dir": "/var/run/hbase", 
+            "hbase_user": "hbase", 
+            "hbase_master_heapsize": "1024m", 
+            "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintG
 CDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\nexport HBASE_MASTER_OPTS=\"-Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# 
 Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\
 "$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% endif %}", 
+            "hbase_regionserver_heapsize": "1024m",
+            "hbase_regionserver_xmn_max": "512",
+            "hbase_regionserver_xmn_ratio": "0.2",
+            "hbase_log_dir": "/var/log/hbase"
+        },
+        "ganglia-env": {
+            "gmond_user": "nobody", 
+            "ganglia_runtime_dir": "/var/run/ganglia/hdp", 
+            "rrdcached_base_dir": "/var/lib/ganglia/rrds", 
+            "rrdcached_flush_timeout": "7200", 
+            "gmetad_user": "nobody", 
+            "rrdcached_write_threads": "4", 
+            "rrdcached_delay": "1800", 
+            "rrdcached_timeout": "3600"
+        },
+        "zookeeper-env": {
+            "clientPort": "2181", 
+            "zk_user": "zookeeper", 
+            "zk_log_dir": "/var/log/zookeeper", 
+            "syncLimit": "5", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "initLimit": "10", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_data_dir": "/hadoop/zookeeper", 
+            "tickTime": "2000"
+        },
+        "mapred-env": {
+            "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.", 
+            "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
+            "mapred_user": "mapred", 
+            "jobhistory_heapsize": "900", 
+            "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce"
+        }, 
+        "tez-env": {
+            "content": "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}", 
+            "tez_user": "tez"
+        }, 
+        "storm-env": {
+            "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\n# export STORM_CONF_DIR=\"\"", 
+            "storm_log_dir": "/var/log/storm", 
+            "storm_pid_dir": "/var/run/storm", 
+            "storm_user": "storm"
+        }, 
+        "falcon-env": {
+            "falcon_port": "15000", 
+            "falcon_pid_dir": "/var/run/falcon", 
+            "falcon_log_dir": "/var/log/falcon", 
+            "falcon.emeddedmq.port": "61616", 
+            "falcon_user": "falcon", 
+            "falcon_local_dir": "/hadoop/falcon", 
+            "content": "\n# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java_home}}\n\n# any additional java opts you want to set. This will apply to both client and server operations\n#export FALCON_OPTS=\n\n# any additional java opts that you want to set for client only\n#export FALCON_CLIENT_OPTS=\n\n# java heap size we want to set for the client. Default is 1024MB\n#export FALCON_CLIENT_HEAP=\n\n# any additional opts you want to set for prisim service.\n#export FALCON_PRISM_OPTS=\n\n# java heap size we want to set for the prisim service. Default is 1024MB\n#export FALCON_PRISM_HEAP=\n\n# any additional opts you want to set for falcon service.\nexport FALCON_SERVER_OPTS=\"-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Dfalcon.emeddedmq.port={{falcon_emeddedmq_port}}\"\n\n# java heap size we want to set for the falcon server. Default is 1024MB\n#export FALCON_SERVER_HEAP=\n\n# What is is considered as falco
 n home dir. Default is the base locaion of the installed software\n#export FALCON_HOME_DIR=\n\n# Where log files are stored. Defatult is logs directory under the base install location\nexport FALCON_LOG_DIR={{falcon_log_dir}}\n\n# Where pid files are stored. Defatult is logs directory under the base install location\nexport FALCON_PID_DIR={{falcon_pid_dir}}\n\n# where the falcon active mq data is stored. Defatult is logs/data directory under the base install location\nexport FALCON_DATA_DIR={{falcon_embeddedmq_data}}\n\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\n#export FALCON_EXPANDED_WEBAPP_DIR=", 
+            "falcon.embeddedmq.data": "/hadoop/falcon/embeddedmq/data", 
+            "falcon.embeddedmq": "true", 
+            "falcon_store_uri": "file:///hadoop/falcon/store"
+        }, 
+        "oozie-env": {
+            "oozie_derby_database": "Derby", 
+            "oozie_admin_port": "11001", 
+            "oozie_hostname": "abtest-3.c.pramod-thangali.internal", 
+            "oozie_pid_dir": "/var/run/oozie", 
+            "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n  export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}\n  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie config
 uration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64", 
+            "oozie_user": "oozie", 
+            "oozie_database": "New Derby Database", 
+            "oozie_data_dir": "/hadoop/oozie/data", 
+            "oozie_log_dir": "/var/log/oozie"
+        }, 
+        "webhcat-env": {
+            "content": "\n# The file containing the running pid\nPID_FILE={{pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=/usr/lib/hadoop"
+        }, 
+        "pig-env": {
+            "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
+        }, 
+        "sqoop-env": {
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\""
+        }
+    },
+    "configuration_attributes": {
+      "mapred-site": {
+        "final": {
+          "mapred.healthChecker.script.path": "true",
+          "mapreduce.jobtracker.staging.root.dir": "true"
+        }
+      },
+      "oozie-site": {
+        "final": {
+          "oozie.service.PurgeService.purge.interval": "true",
+          "oozie.service.CallableQueueService.queue.size": "true"
+        }
+      },
+      "webhcat-site": {
+        "final": {
+          "templeton.pig.path": "true",
+          "templeton.exec.timeout": "true",
+          "templeton.override.enabled": "true"
+        }
+      },
+      "hdfs-site": {
+        "final": {
+          "dfs.web.ugi": "true",
+          "dfs.support.append": "true",
+          "dfs.cluster.administrators": "true"
+        }
+      },
+      "hbase-site": {
+        "final": {
+          "hbase.client.keyvalue.maxsize": "true",
+          "hbase.hstore.compactionThreshold": "true",
+          "hbase.rootdir": "true"
+        }
+      },
+      "core-site": {
+        "final": {
+          "hadoop.proxyuser.hive.groups": "true",
+          "webinterface.private.actions": "true",
+          "hadoop.proxyuser.oozie.hosts": "true"
+        }
+      },
+      "hive-site": {
+        "final": {
+          "javax.jdo.option.ConnectionPassword": "true",
+          "javax.jdo.option.ConnectionDriverName": "true",
+          "hive.optimize.bucketmapjoin.sortedmerge": "true"
+        }
+      }
+    },
+    "configurationTags": {
+        "capacity-scheduler": {
+            "tag": "version1"
+        }, 
+        "oozie-site": {
+            "tag": "version1"
+        }, 
+        "storm-site": {
+            "tag": "version1"
+        }, 
+        "webhcat-site": {
+            "tag": "version1"
+        }, 
+        "global": {
+            "tag": "version1"
+        }, 
+        "mapred-site": {
+            "tag": "version1"
+        }, 
+        "hdfs-site": {
+            "tag": "version1"
+        }, 
+        "hbase-site": {
+            "tag": "version1"
+        }, 
+        "core-site": {
+            "tag": "version1"
+        }, 
+        "yarn-site": {
+            "tag": "version1"
+        }, 
+        "hive-site": {
+            "tag": "version1"
+        }
+    }, 
+    "commandId": "7-1", 
+    "clusterHostInfo": {
+        "ambari_server_host": [
+            "c6401.ambari.apache.org"
+        ],
+        "snamenode_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "nm_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "drpc_server_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "ganglia_server_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hive_server_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "logviewer_server_hosts": [
+            "c6402.ambari.apache.org"
+        ],
+        "hive_metastore_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hbase_rs_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "webhcat_server_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "supervisor_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "ganglia_monitor_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670", 
+            "8670"
+        ], 
+        "rm_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "all_hosts": [
+            "c6401.ambari.apache.org", 
+            "c6402.ambari.apache.org"
+        ], 
+        "storm_ui_server_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "oozie_server": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hs_host": [
+            "c6402.ambari.apache.org"
+        ], 
+        "nimbus_hosts": [
+            "c6401.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6401.ambari.apache.org"
+        ], 
+        "hbase_master_hosts": [
+            "c6402.ambari.apache.org"
+        ], 
+        "hive_mysql_host": [
+            "c6402.ambari.apache.org"
+        ],
+        "falcon_server_hosts": [
+            "c6402.ambari.apache.org"
+        ]
+    }
+}


[7/8] ambari git commit: AMBARI-9993. Add support for management of Phoenix Query Server to HDP Stack (Nick Dimiduk via alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/metrics.json b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/metrics.json
new file mode 100644
index 0000000..7bbf7be
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/metrics.json
@@ -0,0 +1,13549 @@
+{
+  "HBASE_REGIONSERVER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/hbase/regionserver/compactionTime_avg_time": {
+              "metric": "hbase.regionserver.compactionTime_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/closeRegion_num_ops": {
+              "metric": "rpc.rpc.closeRegion_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "regionserver.Server.mutationsWithoutWALSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/unassign_num_ops": {
+              "metric": "rpc.rpc.unassign_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/modifyTable_num_ops": {
+              "metric": "rpc.rpc.modifyTable_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getProtocolVersion_avg_time": {
+              "metric": "rpc.rpc.getProtocolVersion_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getClosestRowBefore_num_ops": {
+              "metric": "rpc.rpc.getClosestRowBefore_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "regionserver.Server.slowAppendCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/lockRow_num_ops": {
+              "metric": "rpc.rpc.lockRow_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/flushRegion_avg_time": {
+              "metric": "rpc.rpc.flushRegion_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/stopMaster_num_ops": {
+              "metric": "rpc.rpc.stopMaster_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/openRegions/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.openRegions.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/balance_avg_time": {
+              "metric": "rpc.rpc.balance_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/modifyColumn_avg_time": {
+              "metric": "rpc.rpc.modifyColumn_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/multi/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.multi.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/rootIndexSizeKB": {
+              "metric": "hbase.regionserver.rootIndexSizeKB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getZooKeeper_num_ops": {
+              "metric": "rpc.rpc.getZooKeeper_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "regionserver.Server.blockCacheCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/flushRegion_num_ops": {
+              "metric": "rpc.rpc.flushRegion_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_std_dev": {
+              "metric": "hbase.regionserver.putRequestLatency_std_dev",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_std_dev": {
+              "metric": "hbase.regionserver.getRequestLatency_std_dev",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/get_num_ops": {
+              "metric": "rpc.rpc.get_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/stopMaster_avg_time": {
+              "metric": "rpc.rpc.stopMaster_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/removeFromOnlineRegions_num_ops": {
+              "metric": "rpc.rpc.removeFromOnlineRegions_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/ping_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.ping_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/openScanner/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.openScanner.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getRegionInfo_avg_time": {
+              "metric": "rpc.rpc.getRegionInfo_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/lockRow_avg_time": {
+              "metric": "rpc.rpc.lockRow_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/commitPending_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.commitPending_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/checkOOME_num_ops": {
+              "metric": "rpc.rpc.checkOOME_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/reportRSFatalError_num_ops": {
+              "metric": "rpc.rpc.reportRSFatalError_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getConfiguration/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/reportRSFatalError_avg_time": {
+              "metric": "rpc.rpc.reportRSFatalError_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/unlockRow/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.unlockRow.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "regionserver.Server.Delete_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getClusterStatus_num_ops": {
+              "metric": "rpc.rpc.getClusterStatus_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getHTableDescriptors_avg_time": {
+              "metric": "rpc.rpc.getHTableDescriptors_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.rpc.rpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/deleteColumn_num_ops": {
+              "metric": "rpc.rpc.deleteColumn_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/delete/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.delete.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/increment_num_ops": {
+              "metric": "rpc.rpc.increment_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getMapCompletionEvents_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/stop/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.stop.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/modifyColumn_num_ops": {
+              "metric": "rpc.rpc.modifyColumn_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/checkOOME_avg_time": {
+              "metric": "rpc.rpc.checkOOME_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/next/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.next.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcSlowResponse_avg_time": {
+              "metric": "rpc.rpc.RpcSlowResponse_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getConfiguration_avg_time": {
+              "metric": "rpc.rpc.getConfiguration_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getServerName/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getServerName.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/unassign_avg_time": {
+              "metric": "rpc.rpc.unassign_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/delete/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.delete.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/canCommit_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.canCommit_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/multi/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.multi.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile": {
+              "metric": "hbase.regionserver.fsReadLatencyHistogram_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+              "metric": "regionserver.Server.Delete_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/compactRegion_avg_time": {
+              "metric": "rpc.rpc.compactRegion_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops": {
+              "metric": "hbase.regionserver.fsWriteLatencyHistogram_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/writeRequestsCount": {
+              "metric": "regionserver.Server.writeRequestCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/execCoprocessor_num_ops": {
+              "metric": "rpc.rpc.execCoprocessor_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/canCommit_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.canCommit_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "regionserver.Server.Get_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/incrementColumnValue_avg_time": {
+              "metric": "rpc.rpc.incrementColumnValue_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/deleteTable_num_ops": {
+              "metric": "rpc.rpc.deleteTable_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops": {
+              "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Mutate_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "regionserver.Server.blockCacheHitCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/exists_avg_time": {
+              "metric": "rpc.rpc.exists_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "regionserver.Server.slowPutCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatency_num_ops": {
+              "metric": "hbase.regionserver.fsWriteLatency_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/exists/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.exists.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/delete_num_ops": {
+              "metric": "rpc.rpc.delete_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/exists_num_ops": {
+              "metric": "rpc.rpc.exists_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/regionServerStartup_avg_time": {
+              "metric": "rpc.rpc.regionServerStartup_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/checkAndDelete_num_ops": {
+              "metric": "rpc.rpc.checkAndDelete_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/closeRegion_avg_time": {
+              "metric": "rpc.rpc.closeRegion_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getProtocolSignature_avg_time": {
+              "metric": "rpc.rpc.getProtocolSignature_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/assign_avg_time": {
+              "metric": "rpc.rpc.assign_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/compactionSize_num_ops": {
+              "metric": "hbase.regionserver.compactionSize_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/close_avg_time": {
+              "metric": "rpc.rpc.close_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "regionserver.Server.blockCacheSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+              "metric": "regionserver.Server.Mutate_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getHServerInfo_num_ops": {
+              "metric": "rpc.rpc.getHServerInfo_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/stop_avg_time": {
+              "metric": "rpc.rpc.stop_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/isStopped_num_ops": {
+              "metric": "rpc.rpc.isStopped_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "regionserver.Server.Mutate_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops": {
+              "metric": "hbase.regionserver.fsReadLatencyHistogram_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatencyHistogram_median": {
+              "metric": "hbase.regionserver.fsWriteLatencyHistogram_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/isMasterRunning_avg_time": {
+              "metric": "rpc.rpc.isMasterRunning_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/incrementColumnValue_num_ops": {
+              "metric": "rpc.rpc.incrementColumnValue_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_std_dev": {
+              "metric": "hbase.regionserver.deleteRequestLatency_std_dev",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/hdfsBlocksLocalityIndex": {
+              "metric": "hbase.regionserver.hdfsBlocksLocalityIndex",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/readRequestsCount": {
+              "metric": "regionserver.Server.readRequestCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "regionserver.Server.Mutate_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "regionserver.Server.storeFileIndexSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/assign_num_ops": {
+              "metric": "rpc.rpc.assign_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/close/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.close.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "regionserver.Server.Delete_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Get_num_ops": {
+              "metric": "regionserver.Server.Get_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Append_num_ops": {
+              "metric": "regionserver.Server.Append_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Delete_num_ops": {
+              "metric": "regionserver.Server.Delete_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Mutate_num_ops": {
+              "metric": "regionserver.Server.Mutate_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Get_95th_percentile": {
+              "metric": "regionserver.Server.Get_95th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Mutate_95th_percentile": {
+              "metric": "regionserver.Server.Mutate_95th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Increment_95th_percentile": {
+              "metric": "regionserver.Server.Increment_95th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Append_95th_percentile": {
+              "metric": "regionserver.Server.Append_95th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Delete_95th_percentile": {
+              "metric": "regionserver.Server.Delete_95th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/percentFilesLocal": {
+              "metric": "regionserver.Server.percentFilesLocal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/updatesBlockedTime": {
+              "metric": "regionserver.Server.updatesBlockedTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/IPC/numOpenConnections": {
+              "metric": "ipc.IPC.numOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/IPC/numActiveHandler": {
+              "metric": "ipc.IPC.numActiveHandler",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/IPC/numCallsInGeneralQueue": {
+              "metric": "ipc.IPC.numCallsInGeneralQueue",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/enableTable_avg_time": {
+              "metric": "rpc.rpc.enableTable_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "regionserver.Server.Mutate_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/close_num_ops": {
+              "metric": "rpc.rpc.close_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getConfiguration/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/done_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.done_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/compactionSize_avg_time": {
+              "metric": "hbase.regionserver.compactionSize_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getFromOnlineRegions_avg_time": {
+              "metric": "rpc.rpc.getFromOnlineRegions_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatencyHistogram_min": {
+              "metric": "hbase.regionserver.fsReadLatencyHistogram_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/increment/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.increment.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/deleteTable_avg_time": {
+              "metric": "rpc.rpc.deleteTable_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/put/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.put.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/delete_avg_time": {
+              "metric": "rpc.rpc.delete_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/statusUpdate_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.statusUpdate_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/openRegions/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.openRegions.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/compactRegion/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.compactRegion.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.rpc.rpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/openScanner/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.openScanner.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getClusterStatus_avg_time": {
+              "metric": "rpc.rpc.getClusterStatus_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/unlockRow/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.unlockRow.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/removeFromOnlineRegions_avg_time": {
+              "metric": "rpc.rpc.removeFromOnlineRegions_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/put/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.put.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/modifyTable_avg_time": {
+              "metric": "rpc.rpc.modifyTable_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/checkAndPut_avg_time": {
+              "metric": "rpc.rpc.checkAndPut_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/isStopped/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.isStopped.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/put_avg_time": {
+              "metric": "rpc.rpc.put_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitRatio": {
+              "metric": "hbase.regionserver.blockCacheHitRatio",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitPercent": {
+              "metric": "regionserver.Server.blockCountHitPercent",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/createTable_avg_time": {
+              "metric": "rpc.rpc.createTable_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev": {
+              "metric": "hbase.regionserver.fsReadLatencyHistogram_std_dev",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getHTableDescriptors_num_ops": {
+              "metric": "rpc.rpc.getHTableDescriptors_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getAlterStatus_avg_time": {
+              "metric": "rpc.rpc.getAlterStatus_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getRegionInfo_num_ops": {
+              "metric": "rpc.rpc.getRegionInfo_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/statusUpdate_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.statusUpdate_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/compactRegion_num_ops": {
+              "metric": "rpc.rpc.compactRegion_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/isAborted_num_ops": {
+              "metric": "rpc.rpc.isAborted_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatencyHistogram_max": {
+              "metric": "hbase.regionserver.fsReadLatencyHistogram_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheEvictedCount": {
+              "metric": "regionserver.Server.blockCacheEvictionCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/checkOOME/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.checkOOME.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/disableTable_num_ops": {
+              "metric": "rpc.rpc.disableTable_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/openScanner_num_ops": {
+              "metric": "rpc.rpc.openScanner_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/regionServerReport_num_ops": {
+              "metric": "rpc.rpc.regionServerReport_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/openRegions_avg_time": {
+              "metric": "rpc.rpc.openRegions_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/exists/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.exists.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Mutate_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/isMasterRunning_num_ops": {
+              "metric": "rpc.rpc.isMasterRunning_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/balanceSwitch_num_ops": {
+              "metric": "rpc.rpc.balanceSwitch_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/offline_num_ops": {
+              "metric": "rpc.rpc.offline_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_max": {
+              "metric": "regionserver.Server.Get_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/abort_num_ops": {
+              "metric": "rpc.rpc.abort_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile": {
+              "metric": "hbase.regionserver.fsReadLatencyHistogram_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitCachingRatio": {
+              "metric": "hbase.regionserver.blockCacheHitCachingRatio",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rollHLogWriter_num_ops": {
+              "metric": "rpc.rpc.rollHLogWriter_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/openRegions_num_ops": {
+              "metric": "rpc.rpc.openRegions_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/splitRegion_avg_time": {
+              "metric": "rpc.rpc.splitRegion_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/closeRegion/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.closeRegion.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/checkAndPut/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Get_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatencyHistogram_min": {
+              "metric": "hbase.regionserver.fsWriteLatencyHistogram_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev": {
+              "metric": "hbase.regionserver.fsWriteLatencyHistogram_std_dev",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Delete_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatencyHistogram_max": {
+              "metric": "hbase.regionserver.fsWriteLatencyHistogram_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getTask_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.getTask_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/replicateLogEntries_num_ops": {
+              "metric": "rpc.rpc.replicateLogEntries_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/multi_avg_time": {
+              "metric": "rpc.rpc.multi_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "regionserver.Server.slowIncrementCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+              "metric": "regionserver.Server.Mutate_95th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/compactionQueueSize": {
+              "metric": "regionserver.Server.compactionQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/getCatalogTracker_avg_time": {
+              "metric": "rpc.rpc.getCatalogTracker_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/splitRegion_num_ops": {
+              "metric": "rpc.rpc.splitRegion_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/balance_num_ops": {
+              "metric": "rpc.rpc.balance_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushTime_num_ops": {
+              "metric": "hbase.regionserver.flushTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/shutdown_num_ops": {
+              "metric": "rpc.rpc.shutdown_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatency_num_ops": {
+              "metric": "hbase.regionserver.fsReadLatency_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/isAborted/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.isAborted.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Get_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getServerName_avg_time": {
+              "metric": "rpc.rpc.getServerName_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/compactionTime_num_ops": {
+              "metric": "hbase.regionserver.compactionTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/abort_avg_time": {
+              "metric": "rpc.rpc.abort_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/enableTable_num_ops": {
+              "metric": "rpc.rpc.enableTable_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/lockRow/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.lockRow.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "regionserver.Server.storeCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/addColumn_avg_time": {
+              "metric": "rpc.rpc.addColumn_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getServerName_num_ops": {
+              "metric": "rpc.rpc.getServerName_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getServerName/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getServerName.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.rpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/disableTable_avg_time": {
+              "metric": "rpc.rpc.disableTable_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/abort/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.abort.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/openRegion_avg_time": {
+              "metric": "rpc.rpc.openRegion_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/regionServerReport_avg_time": {
+              "metric": "rpc.rpc.regionServerReport_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getAlterStatus_num_ops": {
+              "metric": "rpc.rpc.getAlterStatus_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/flushRegion/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.flushRegion.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/next_avg_time": {
+              "metric": "rpc.rpc.next_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+              "metric": "regionserver.Server.Get_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/bulkLoadHFiles_num_ops": {
+              "metric": "rpc.rpc.bulkLoadHFiles_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/ping_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.ping_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatency_avg_time": {
+              "metric": "hbase.regionserver.fsReadLatency_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushSize_num_ops": {
+              "metric": "hbase.regionserver.flushSize_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/balanceSwitch_avg_time": {
+              "metric": "rpc.rpc.balanceSwitch_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "regionserver.Server.Mutate_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/openRegion/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.openRegion.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/lockRow/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.lockRow.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.callQueueLen",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/openRegion_num_ops": {
+              "metric": "rpc.rpc.openRegion_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/compactRegion/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.compactRegion.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsSyncLatency_num_ops": {
+              "metric": "hbase.regionserver.fsSyncLatency_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile": {
+              "metric": "hbase.regionserver.fsWriteLatencyHistogram_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getOnlineRegions_avg_time": {
+              "metric": "rpc.rpc.getOnlineRegions_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile": {
+              "metric": "hbase.regionserver.fsWriteLatencyHistogram_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/move_num_ops": {
+              "metric": "rpc.rpc.move_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/stop_num_ops": {
+              "metric": "rpc.rpc.stop_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/replicateLogEntries_avg_time": {
+              "metric": "rpc.rpc.replicateLogEntries_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "regionserver.Server.Get_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/get_avg_time": {
+              "metric": "rpc.rpc.get_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/multi_num_ops": {
+              "metric": "rpc.rpc.multi_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/next/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.next.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/addToOnlineRegions_avg_time": {
+              "metric": "rpc.rpc.addToOnlineRegions_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/deleteColumn_avg_time": {
+              "metric": "rpc.rpc.deleteColumn_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/regions": {
+              "metric": "regionserver.Server.regionCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/bulkLoadHFiles_avg_time": {
+              "metric": "rpc.rpc.bulkLoadHFiles_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/isAborted/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.isAborted.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/stop/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.stop.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/addToOnlineRegions_num_ops": {
+              "metric": "rpc.rpc.addToOnlineRegions_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/abort/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.abort.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "regionserver.Server.blockCacheFreeSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/offline_avg_time": {
+              "metric": "rpc.rpc.offline_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/unlockRow_avg_time": {
+              "metric": "rpc.rpc.unlockRow_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "regionserver.Server.blockCacheMissCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/getCatalogTracker_num_ops": {
+              "metric": "rpc.rpc.getCatalogTracker_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/checkOOME/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.checkOOME.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "regionserver.Server.flushQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/checkAndPut/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/close/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.close.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/execCoprocessor_avg_time": {
+              "metric": "rpc.rpc.execCoprocessor_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean": {
+              "metric": "hbase.regionserver.fsWriteLatencyHistogram_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/createTable_num_ops": {
+              "metric": "rpc.rpc.createTable_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getConfiguration_num_ops": {
+              "metric": "rpc.rpc.getConfiguration_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/isStopped_avg_time": {
+              "metric": "rpc.rpc.isStopped_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rollHLogWriter_avg_time": {
+              "metric": "rpc.rpc.rollHLogWriter_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsSyncLatency_avg_time": {
+              "metric": "hbase.regionserver.fsSyncLatency_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "regionserver.Server.Delete_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getMapCompletionEvents_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatencyHistogram_mean": {
+              "metric": "hbase.regionserver.fsReadLatencyHistogram_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "regionserver.Server.staticIndexSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getFromOnlineRegions_num_ops": {
+              "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "regionserver.Server.mutationsWithoutWALCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/get/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.get.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "regionserver.Server.Get_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/openScanner_avg_time": {
+              "metric": "rpc.rpc.openScanner_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcSlowResponse_num_ops": {
+              "metric": "rpc.rpc.RpcSlowResponse_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/splitRegion/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.splitRegion.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/isAborted_avg_time": {
+              "metric": "rpc.rpc.isAborted_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/flushRegion/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.flushRegion.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushSize_avg_time": {
+              "metric": "hbase.regionserver.flushSize_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/commitPending_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.commitPending_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getClosestRowBefore_avg_time": {
+              "metric": "rpc.rpc.getClosestRowBefore_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "regionserver.Server.Delete_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/get/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.get.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/put_num_ops": {
+              "metric": "rpc.rpc.put_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/move_avg_time": {
+              "metric": "rpc.rpc.move_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/percentFilesLocal": {
+              "metric": "regionserver.Server.percentFilesLocal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatency_avg_time": {
+              "metric": "hbase.regionserver.fsWriteLatency_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/increment/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.increment.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/openRegion/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.openRegion.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getTask_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.getTask_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/addColumn_num_ops": {
+              "metric": "rpc.rpc.addColumn_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/maxMemoryM": {
+              "metric": "jvm.metrics.maxMemoryM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getOnlineRegions_num_ops": {
+              "metric": "rpc.rpc.getOnlineRegions_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushTime_avg_time": {
+              "metric": "hbase.regionserver.flushTime_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/done_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.done_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getProtocolVersion_num_ops": {
+              "metric": "rpc.rpc.getProtocolVersion_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/unlockRow_num_ops": {
+              "metric": "rpc.rpc.unlockRow_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "regionserver.Server.slowGetCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time": {
+              "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/shutdown_avg_time": {
+              "metric": "rpc.rpc.shutdown_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/regionServerStartup_num_ops": {
+              "metric": "rpc.rpc.regionServerStartup_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/requests": {
+              "metric": "regionserver.Server.totalRequestCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile": {
+              "metric": "hbase.regionserver.fsReadLatencyHistogram_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile": {
+              "metric": "hbase.regionserver.fsWriteLatencyHistogram_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storefiles": {
+              "metric": "regionserver.Server.storeFileCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/next_num_ops": {
+              "metric": "rpc.rpc.next_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time": {
+              "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowDeleteCount": {
+              "metric": "regionserver.Server.slowDeleteCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/checkAndDelete_avg_time": {
+              "metric": "rpc.rpc.checkAndDelete_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/closeRegion/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.closeRegion.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getHServerInfo_avg_time": {
+              "metric": "rpc.rpc.getHServerInfo_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getZooKeeper_avg_time": {
+              "metric": "rpc.rpc.getZooKeeper_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/hlogFileCount": {
+              "metric": "hbase.regionserver.hlogFileCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+              "metric": "regionserver.Server.Get_95th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+              "metric": "regionserver.Server.Delete_95th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/memstoreSize": {
+              "metric": "regionserver.Server.memStoreSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/fsReadLatencyHistogram_median": {
+              "metric": "hbase.regionserver.fsReadLatencyHistogram_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getProtocolSignature_num_ops": {
+              "metric": "rpc.rpc.getProtocolSignature_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops": {
+              "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Delete_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.rpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+              "metric": "regionserver.Server.staticBloomSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/checkAndPut_num_ops": {
+              "metric": "rpc.rpc.checkAndPut_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/increment_avg_time": {
+              "metric": "rpc.rpc.increment_avg_time",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/percentFilesLocal": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheEvictedCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/compactionQueueSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+              "pointInTime": true

<TRUNCATED>

[4/8] ambari git commit: AMBARI-9993. Add support for management of Phoenix Query Server to HDP Stack (Nick Dimiduk via alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_regionserver.py
new file mode 100644
index 0000000..153555e
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_regionserver.py
@@ -0,0 +1,601 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+from unittest import skip
+
+@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
+@patch("os.path.exists", new = MagicMock(return_value=True))
+class TestHbaseRegionServer(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HBASE/1.1.0.2.3/package"
+  STACK_VERSION = "2.3"
+
+  def test_configure_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "configure",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+    
+  def test_start_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "start",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /etc/hbase/conf start regionserver',
+      not_if = 'ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',
+      user = 'hbase'
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "stop",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver',
+        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/hbase-hbase-regionserver.pid`',
+        timeout = 30,
+        user = 'hbase',
+    )
+    
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hbase/hbase-hbase-regionserver.pid',
+    )
+    self.assertNoMoreResources()
+    
+  def test_configure_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "configure",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+    
+  def test_start_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "start",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assert_configure_secured()
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /etc/hbase/conf start regionserver',
+      not_if = 'ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "stop",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver',
+        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/hbase-hbase-regionserver.pid`',
+        timeout = 30,
+        user = 'hbase',
+    )
+    
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hbase/hbase-hbase-regionserver.pid',
+    )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/etc/hbase',
+      mode = 0755
+    )
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      mode=0775,
+      recursive = True,
+      cd_access='a'
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local/jars',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hdfs',
+      group = 'hadoop',
+      conf_dir = '/etc/hadoop/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-policy'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy']
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('File',
+                              '/etc/hbase/conf/log4j.properties',
+                              mode=0644,
+                              group='hadoop',
+                              owner='hbase',
+                              content='log4jproperties\nline2'
+    )
+    self.assertResourceCalled('HdfsDirectory', 'hdfs://c6405.ambari.apache.org:8020/apps/hbase/data',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0711,
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create'],
+                              )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/etc/hbase',
+      mode = 0755
+    )
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      mode=0775,
+      recursive = True,
+      cd_access='a'
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local/jars',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hdfs',
+      group = 'hadoop',
+      conf_dir = '/etc/hadoop/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-policy'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy']
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_regionserver_jaas.conf',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('File',
+                              '/etc/hbase/conf/log4j.properties',
+                              mode=0644,
+                              group='hadoop',
+                              owner='hbase',
+                              content='log4jproperties\nline2'
+    )
+    self.assertResourceCalled('HdfsDirectory', 'hdfs://c6405.ambari.apache.org:8020/apps/hbase/data',
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0711,
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create'],
+                              )
+
+  @skip("there's nothing to upgrade to yet")    
+  def test_start_default_24(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "start",
+                   config_file="hbase-rs-2.4.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES)
+    
+    self.assertResourceCalled('Directory', '/etc/hbase',
+      mode = 0755)
+
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True)
+
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      mode = 0775,
+      recursive = True,
+      cd_access='a')
+
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True)
+
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local/jars',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True)
+
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site'])
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+                              owner = 'hbase',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hbase/conf',
+                              configurations = self.getConfig()['configurations']['core-site'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site'])
+
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['hdfs-site'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site'])
+
+    self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-policy'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy'])
+
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']))
+
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS')
+
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None)
+
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True)
+
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True)
+
+    self.assertResourceCalled('File',
+                              '/etc/hbase/conf/log4j.properties',
+                              mode=0644,
+                              group='hadoop',
+                              owner='hbase',
+                              content='log4jproperties\nline2')
+
+    self.assertResourceCalled('HdfsDirectory', 'hdfs://nn1/apps/hbase/data',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = "/usr/bin/kinit",
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'])
+
+    self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = "/usr/bin/kinit",
+                              mode = 0711,
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'])
+
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = "/usr/bin/kinit",
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create'])
+
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /etc/hbase/conf start regionserver',
+      not_if = 'ls /var/run/hbase/hbase-hbase-regionserver.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',
+      user = 'hbase')
+
+    self.assertNoMoreResources()
+
+  @patch("resource_management.libraries.functions.security_commons.build_expectations")
+  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
+  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
+  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
+  @patch("resource_management.libraries.script.Script.put_structured_out")
+  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
+    # Test that function works when is called with correct parameters
+
+    security_params = {
+      'hbase-site': {
+        'hbase.regionserver.keytab.file': '/path/to/hbase_keytab',
+        'hbase.regionserver.kerberos.principal': 'hbase_principal'
+      }
+    }
+
+    result_issues = []
+    props_value_check = {"hbase.security.authentication": "kerberos",
+                           "hbase.security.authorization": "true"}
+    props_empty_check = ["hbase.regionserver.keytab.file",
+                           "hbase.regionserver.kerberos.principal"]
+
+    props_read_check = ["hbase.regionserver.keytab.file"]
+
+    get_params_mock.return_value = security_params
+    validate_security_config_mock.return_value = result_issues
+
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "security_status",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+    build_exp_mock.assert_called_with('hbase-site', props_value_check, props_empty_check, props_read_check)
+    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
+    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
+                                           self.config_dict['configurations']['hbase-env']['hbase_user'],
+                                           security_params['hbase-site']['hbase.regionserver.keytab.file'],
+                                           security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
+                                           self.config_dict['hostname'],
+                                           '/tmp')
+
+     # Testing that the exception throw by cached_executor is caught
+    cached_kinit_executor_mock.reset_mock()
+    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
+
+    try:
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "security_status",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+      )
+    except:
+      self.assertTrue(True)
+
+    # Testing with a security_params which doesn't contains hbase-site
+    empty_security_params = {}
+    cached_kinit_executor_mock.reset_mock()
+    get_params_mock.reset_mock()
+    put_structured_out_mock.reset_mock()
+    get_params_mock.return_value = empty_security_params
+
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "security_status",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
+
+    # Testing with not empty result_issues
+    result_issues_with_params = {
+      'hbase-site' : "Something bad happened"
+    }
+
+    validate_security_config_mock.reset_mock()
+    get_params_mock.reset_mock()
+    validate_security_config_mock.return_value = result_issues_with_params
+    get_params_mock.return_value = security_params
+
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "security_status",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
+
+    # Testing with security_enable = false
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
+                   classname = "HbaseRegionServer",
+                   command = "security_status",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_service_check.py b/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_service_check.py
new file mode 100644
index 0000000..ca6a0d8
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_service_check.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+from unittest import skip
+import datetime
+import resource_management.libraries.functions
+
+@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
+@patch.object(resource_management.libraries.functions, "get_unique_id_and_date", new = MagicMock(return_value=''))
+class TestServiceCheck(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HBASE/1.1.0.2.3/package"
+  STACK_VERSION = "2.3"
+
+  def test_service_check_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
+                        classname="HbaseServiceCheck",
+                        command="service_check",
+                        config_file="hbase_default.json",
+                        hdp_stack_version = self.STACK_VERSION,
+                        target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assertResourceCalled('File', '/tmp/hbaseSmokeVerify.sh',
+      content = StaticFile('hbaseSmokeVerify.sh'),
+      mode = 0755,
+    )
+    self.assertResourceCalled('File', '/tmp/hbase-smoke.sh',
+      content = Template('hbase-smoke.sh.j2'),
+      mode = 0755,
+    )
+    self.assertResourceCalled('Execute', ' /usr/hdp/current/hbase-regionserver/bin/hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
+      logoutput = True,
+      tries = 3,
+      user = 'ambari-qa',
+      try_sleep = 5,
+    )
+    self.assertResourceCalled('Execute', ' /tmp/hbaseSmokeVerify.sh /etc/hbase/conf  /usr/hdp/current/hbase-regionserver/bin/hbase',
+      logoutput = True,
+      tries = 3,
+      user = 'ambari-qa',
+      try_sleep = 5,
+    )
+    self.assertNoMoreResources()
+    
+    
+  def test_service_check_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
+                        classname="HbaseServiceCheck",
+                        command="service_check",
+                        config_file="hbase_secure.json",
+                        hdp_stack_version = self.STACK_VERSION,
+                        target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assertResourceCalled('File', '/tmp/hbaseSmokeVerify.sh',
+      content = StaticFile('hbaseSmokeVerify.sh'),
+      mode = 0755,
+    )
+    self.assertResourceCalled('File', '/tmp/hbase-smoke.sh',
+      content = Template('hbase-smoke.sh.j2'),
+      mode = 0755,
+    )
+    self.assertResourceCalled('File', '/tmp/hbase_grant_permissions.sh',
+      content = Template('hbase_grant_permissions.j2'),
+      owner = 'hbase',
+      group = 'hadoop',
+      mode = 0644,
+    )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase@EXAMPLE.COM; /usr/hdp/current/hbase-regionserver/bin/hbase shell /tmp/hbase_grant_permissions.sh',
+      user = 'hbase',
+    )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa@EXAMPLE.COM; /usr/hdp/current/hbase-regionserver/bin/hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
+      logoutput = True,
+      tries = 3,
+      try_sleep = 5,
+      user = 'ambari-qa'
+    )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa@EXAMPLE.COM; /tmp/hbaseSmokeVerify.sh /etc/hbase/conf  /usr/hdp/current/hbase-regionserver/bin/hbase',
+      logoutput = True,
+      tries = 3,
+      try_sleep = 5,
+      user = 'ambari-qa'
+    )
+    self.assertNoMoreResources()
+
+  @skip("there's nothing to upgrade to yet")    
+  def test_service_check_24(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
+                        classname="HbaseServiceCheck",
+                        command="service_check",
+                        config_file="hbase-check-2.4.json",
+                        hdp_stack_version = self.STACK_VERSION,
+                        target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assertResourceCalled('File', '/tmp/hbaseSmokeVerify.sh',
+      content = StaticFile('hbaseSmokeVerify.sh'),
+      mode = 0755,
+    )
+    self.assertResourceCalled('File', '/tmp/hbase-smoke.sh',
+      content = Template('hbase-smoke.sh.j2'),
+      mode = 0755,
+    )
+    self.assertResourceCalled('Execute', ' /usr/hdp/current/hbase-client/bin/hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
+      logoutput = True,
+      tries = 3,
+      user = 'ambari-qa',
+      try_sleep = 5,
+    )
+    self.assertResourceCalled('Execute', ' /tmp/hbaseSmokeVerify.sh /etc/hbase/conf  /usr/hdp/current/hbase-client/bin/hbase',
+      logoutput = True,
+      tries = 3,
+      user = 'ambari-qa',
+      try_sleep = 5,
+    )
+    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/test/python/stacks/2.3/HBASE/test_phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/HBASE/test_phoenix_queryserver.py b/ambari-server/src/test/python/stacks/2.3/HBASE/test_phoenix_queryserver.py
new file mode 100644
index 0000000..32a703d
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/HBASE/test_phoenix_queryserver.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+from unittest import skip
+
+@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
+@patch("os.path.exists", new = MagicMock(return_value=True))
+class TestPhoenixQueryServer(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HBASE/1.1.0.2.3/package"
+  STACK_VERSION = "2.3"
+
+  def test_configure_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
+                   classname = "PhoenixQueryServer",
+                   command = "configure",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assertNoMoreResources()
+    
+  def test_start_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
+                   classname = "PhoenixQueryServer",
+                   command = "start",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+  def test_stop_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
+                   classname = "PhoenixQueryServer",
+                   command = "stop",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py stop',
+        on_timeout = '! ( ls /var/run/hbase/phoenix-hbase-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/phoenix-hbase-server.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/phoenix-hbase-server.pid`',
+        timeout = 30,
+    )
+    
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hbase/phoenix-hbase-server.pid',
+    )
+    self.assertNoMoreResources()
+    
+  def test_configure_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
+                   classname = "PhoenixQueryServer",
+                   command = "configure",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assertNoMoreResources()
+    
+  def test_start_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
+                   classname = "PhoenixQueryServer",
+                   command = "start",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+  def test_stop_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
+                   classname = "PhoenixQueryServer",
+                   command = "stop",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+    self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py stop',
+        on_timeout = '! ( ls /var/run/hbase/phoenix-hbase-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/phoenix-hbase-server.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/phoenix-hbase-server.pid`',
+        timeout = 30,
+    )
+    
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hbase/phoenix-hbase-server.pid',
+    )
+    self.assertNoMoreResources()
+
+  @skip("there's nothing to upgrade to yet")    
+  def test_start_default_24(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/phoenix_queryserver.py",
+                   classname = "PhoenixQueryServer",
+                   command = "start",
+                   config_file="hbase-rs-2.4.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES)
+    
+    self.assertResourceCalled('Directory', '/etc/hbase',
+      mode = 0755)
+
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True)
+
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site'])
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+                              owner = 'hbase',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hbase/conf',
+                              configurations = self.getConfig()['configurations']['core-site'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']))
+
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True)
+
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True)
+
+    self.assertResourceCalled('File',
+                              '/usr/lib/phoenix/bin/log4j.properties',
+                              mode=0644,
+                              group='hadoop',
+                              owner='hbase',
+                              content='log4jproperties\nline2')
+
+
+    self.assertResourceCalled('Execute', '/usr/hdp/current/phoenix-server/bin/queryserver.py start',
+      not_if = 'ls /var/run/hbase/phoenix-hbase-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/phoenix-hbase-server.pid` >/dev/null 2>&1',
+      user = 'hbase')
+
+    self.assertNoMoreResources()


[8/8] ambari git commit: AMBARI-9993. Add support for management of Phoenix Query Server to HDP Stack (Nick Dimiduk via alejandro)

Posted by al...@apache.org.
AMBARI-9993. Add support for management of Phoenix Query Server to HDP Stack (Nick Dimiduk via alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/54647547
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/54647547
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/54647547

Branch: refs/heads/trunk
Commit: 5464754717b0be739df6171ee5b6119548ec7bd9
Parents: b589692
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Apr 15 15:52:27 2015 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Wed Apr 15 15:52:27 2015 -0700

----------------------------------------------------------------------
 .../common-services/HBASE/1.1.0.2.3/alerts.json |   157 +
 .../HBASE/1.1.0.2.3/configuration/hbase-env.xml |   155 +
 .../1.1.0.2.3/configuration/hbase-log4j.xml     |   143 +
 .../1.1.0.2.3/configuration/hbase-policy.xml    |    53 +
 .../1.1.0.2.3/configuration/hbase-site.xml      |   502 +
 .../HBASE/1.1.0.2.3/kerberos.json               |   105 +
 .../HBASE/1.1.0.2.3/metainfo.xml                |   158 +
 .../HBASE/1.1.0.2.3/metrics.json                | 13549 +++++++++++++++++
 .../1.1.0.2.3/package/files/draining_servers.rb |   164 +
 .../1.1.0.2.3/package/files/hbaseSmokeVerify.sh |    34 +
 .../HBASE/1.1.0.2.3/package/scripts/__init__.py |    19 +
 .../1.1.0.2.3/package/scripts/functions.py      |    40 +
 .../HBASE/1.1.0.2.3/package/scripts/hbase.py    |   176 +
 .../1.1.0.2.3/package/scripts/hbase_client.py   |    66 +
 .../package/scripts/hbase_decommission.py       |    93 +
 .../1.1.0.2.3/package/scripts/hbase_master.py   |   148 +
 .../package/scripts/hbase_regionserver.py       |   156 +
 .../1.1.0.2.3/package/scripts/hbase_service.py  |    51 +
 .../1.1.0.2.3/package/scripts/hbase_upgrade.py  |    37 +
 .../HBASE/1.1.0.2.3/package/scripts/params.py   |    25 +
 .../1.1.0.2.3/package/scripts/params_linux.py   |   249 +
 .../1.1.0.2.3/package/scripts/params_windows.py |    37 +
 .../package/scripts/phoenix_queryserver.py      |    55 +
 .../package/scripts/phoenix_service.py          |    44 +
 .../1.1.0.2.3/package/scripts/service_check.py  |    97 +
 .../package/scripts/setup_ranger_hbase.py       |   202 +
 .../1.1.0.2.3/package/scripts/status_params.py  |    41 +
 .../HBASE/1.1.0.2.3/package/scripts/upgrade.py  |    49 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |   105 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |   104 +
 .../package/templates/hbase-smoke.sh.j2         |    44 +
 .../package/templates/hbase_client_jaas.conf.j2 |    23 +
 .../templates/hbase_grant_permissions.j2        |    39 +
 .../package/templates/hbase_master_jaas.conf.j2 |    26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |    26 +
 .../package/templates/regionservers.j2          |    20 +
 .../HBASE/1.1.0.2.3/widgets.json                |   192 +
 .../stacks/2.3/HBASE/test_hbase_client.py       |   213 +
 .../stacks/2.3/HBASE/test_hbase_master.py       |   678 +
 .../stacks/2.3/HBASE/test_hbase_regionserver.py |   601 +
 .../2.3/HBASE/test_hbase_service_check.py       |   131 +
 .../2.3/HBASE/test_phoenix_queryserver.py       |   160 +
 .../stacks/2.3/configs/default.hbasedecom.json  |   713 +
 .../stacks/2.3/configs/hbase_default.json       |   417 +
 .../python/stacks/2.3/configs/hbase_secure.json |   738 +
 45 files changed, 20835 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/alerts.json b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/alerts.json
new file mode 100644
index 0000000..d10897a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/alerts.json
@@ -0,0 +1,157 @@
+{
+  "HBASE": {
+    "service": [
+      {
+        "name": "hbase_regionserver_process_percent",
+        "label": "Percent RegionServers Available",
+        "description": "This service-level alert is triggered if the configured percentage of RegionServer processes cannot be determined to be up and listening on the network for the configured warning and critical thresholds. It aggregates the results of RegionServer process down checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "hbase_regionserver_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      }    
+    ],
+    "HBASE_MASTER": [
+      {
+        "name": "hbase_master_process",
+        "label": "HBase Master Process",
+        "description": "This alert is triggered if the HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.master.port}}",
+          "default_port": 60000,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "hbase_master_cpu",
+        "label": "HBase Maser CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hbase-site/hbase.master.info.port}}",
+            "default_port": 60010
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "regionservers_health_summary",
+        "label": "RegionServers Health Summary",
+        "description": "This service-level alert is triggered if there are unhealthy RegionServers",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hbase-site/hbase.master.info.port}}",
+            "default_port": 60010
+          },
+          "reporting": {
+            "ok": {
+              "text": "All {1} RegionServer(s) are alive"
+            },
+            "warning": {
+              "text": "Dead RegionServer(s): {0} out of {1}",
+              "value": 1
+            },
+            "critical": {
+              "text": "Dead RegionServer(s): {0} out of {1}",
+              "value": 1
+            },
+            "units" : "RegionServer(s)"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=HBase,name=Master,sub=Server/numDeadRegionServers",
+              "Hadoop:service=HBase,name=Master,sub=Server/numRegionServers"
+            ],
+            "value": "{0}"
+          }
+        }
+      }
+    ],
+    "HBASE_REGIONSERVER": [
+      {
+        "name": "hbase_regionserver_process",
+        "label": "HBase RegionServer Process",
+        "description": "This host-level alert is triggered if the RegionServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.regionserver.info.port}}",
+          "default_port": 60030,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-env.xml b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-env.xml
new file mode 100644
index 0000000..abef070
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-env.xml
@@ -0,0 +1,155 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/hbase</value>
+    <description>Pid Directory for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>4096</value>
+    <description>HBase RegionServer Heap Size.</description>
+    <display-name>HBase RegionServer Maximum Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1024</minimum>
+      <maximum>6554</maximum>
+      <unit>MB</unit>
+      <increment-step>250</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_max</name>
+    <value>512</value>
+    <description>
+Sets the upper bound on HBase RegionServers' young generation size.
+This value is used in case the young generation size (-Xmn) calculated based on the max heapsize (hbase_regionserver_heapsize)
+and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
+    </description>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_ratio</name>
+    <value>0.2</value>
+    <description>Percentage of max heap size (-Xmx) which used for young generation heap (-Xmn).</description>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>4096</value>
+    <description>HBase Master Heap Size</description>
+    <display-name>HBase Master Maximum Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1024</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>250</increment-step>
+    </value-attributes>
+  </property>
+   <property>
+    <name>hbase_user</name>
+    <value>hbase</value>
+    <property-type>USER</property-type>
+    <description>HBase User Name.</description>
+  </property>
+
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hbase-env.sh file</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% else %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+{% endif %}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-log4j.xml
new file mode 100644
index 0000000..57b3845
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-log4j.xml
@@ -0,0 +1,143 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.security.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Rolling File Appender properties
+hbase.log.maxfilesize=256MB
+hbase.log.maxbackupindex=20
+
+# Rolling File Appender
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+#
+# Security audit appender
+#
+hbase.security.log.file=SecurityAuth.audit
+hbase.security.log.maxfilesize=256MB
+hbase.security.log.maxbackupindex=20
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
+log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.category.SecurityLogger=${hbase.security.logger}
+log4j.additivity.SecurityLogger=false
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-policy.xml b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-policy.xml
new file mode 100644
index 0000000..2f12801
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HRegionInterface protocol implementations (ie. 
+    clients talking to HRegionServers)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterInterface protocol implementation (ie. 
+    clients talking to HMaster for admin operations).
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.masterregion.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterRegionInterface protocol implementations
+    (for HRegionServers communicating with HMaster)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-site.xml b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-site.xml
new file mode 100644
index 0000000..eda02ca
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/configuration/hbase-site.xml
@@ -0,0 +1,502 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://localhost:8020/apps/hbase/data</value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>60000</value>
+    <description>The port the HBase Master should bind to.</description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/hadoop/hbase</value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local storage
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>60010</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>60030</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.4</value>
+    <description>Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+    <display-name>% of RegionServer Allocated to Write Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>30</value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    Default is 10.
+    </description>
+    <display-name>Number of Handlers per RegionServer</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>5</minimum>
+      <maximum>240</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>604800000</value>
+    <description>The time (in milliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: 1 day.
+    Set to 0 to disable automated major compactions.
+    </description>
+    <display-name>Major Compaction Interval</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2592000000</maximum>
+      <unit>milliseconds</unit>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.38</value>
+    <description>When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>4</value>
+    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    time hbase.hregion.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME
+    </description>
+    <display-name>Per-Column Family Memstore Block Multiplier</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>2</value>
+        </entry>
+        <entry>
+          <value>4</value>
+        </entry>
+        <entry>
+          <value>8</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+    Memstore will be flushed to disk if size of the memstore
+    exceeds this number of bytes.  Value is checked by a thread that runs
+    every hbase.server.thread.wakefrequency.
+    </description>
+    <display-name>Per-Column Family Memstore Flush Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>33554432</minimum>
+      <maximum>268435456</maximum>
+      <increment-step>1048576</increment-step>
+      <unit>B</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value>true</value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>1073741824</value>
+    <description>
+    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+    grown to exceed this value, the hosting HRegion is split in two.
+    Default: 1G.
+    </description>
+    <display-name>Maximum Region File Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1073741824</minimum>
+      <maximum>10737418240</maximum>
+      <unit>B</unit>
+      <increment-step>268435456</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+    <display-name>Number of Fetched Rows when Scanning from Disk</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>100</minimum>
+      <maximum>10000</maximum>
+      <increment-step>100</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>90000</value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+    <display-name>Zookeeper Session Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>10000</minimum>
+      <maximum>180000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>1048576</value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+    <display-name>Maximum Record Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1048576</minimum>
+      <maximum>31457280</maximum>
+      <unit>B</unit>
+      <increment-step>262144</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value>3</value>
+    <description>
+    If more than this number of HStoreFiles in any one HStore
+    (one HStoreFile is written per flush of memstore) then a compaction
+    is run to rewrite all HStoreFiles files as one.  Larger numbers
+    put off compaction but when it runs, it takes longer to complete.
+    </description>
+    <display-name>Maximum Files in a Store before Compaction</display-name>
+    <value-attributes>
+      <type>int</type>
+      <entries>
+        <entry>
+          <value>2</value>
+        </entry>
+        <entry>
+          <value>3</value>
+        </entry>
+        <entry>
+          <value>4</value>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hstore.flush.retries.number</name>
+    <value>120</value>
+    <description>
+    The number of times the region flush operation will be retried.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value>10</value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.40</value>
+    <description>
+        Percentage of maximum heap (-Xmx setting) to allocate to block cache
+        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+        Set to 0 to disable but it's not recommended.
+    </description>
+    <display-name>% of RegionServer Allocated to Read Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+    </value-attributes>
+  </property>
+
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+      (no authentication), and 'kerberos'.
+    </description>
+    <display-name>Enable Authentication</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+        </entry>
+        <entry>
+          <value>false</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
+    </description>
+    <display-name>Enable Authorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+        </entry>
+        <entry>
+          <value>false</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+    default on all tables. For any override coprocessor method, these classes
+    will be called in order. After implementing your own Coprocessor, just put
+    it in HBase's classpath and add the fully qualified class name here.
+    A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.retries.number</name>
+    <value>35</value>
+    <description>Maximum retries.  Used as maximum for all retryable
+    operations such as the getting of a cell's value, starting a row update,
+    etc.  Retry interval is a rough function based on hbase.client.pause.  At
+    first we retry at this interval but then with backoff, we pretty quickly reach
+    retrying every ten seconds.  See HConstants#RETRY_BACKOFF for how the backup
+    ramps up.  Change this setting and hbase.client.pause to suit your workload.</description>
+    <display-name>Maximum Client Retries</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>5</minimum>
+      <maximum>50</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.rpc.timeout</name>
+    <value>60000</value>
+    <description>This is for the RPC layer to define how long HBase client applications
+        take for a remote call to time out. It uses pings to check connections
+        but will eventually throw a TimeoutException.</description>
+    <display-name>HBase RPC Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>30000</minimum>
+      <maximum>300000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>Disables version verification.</description>
+  </property>
+  <property>
+    <name>phoenix.query.timeoutMs</name>
+    <value>60000</value>
+    <description>Number of milliseconds after which a query will timeout on the client.</description>
+    <display-name>Phoenix Query Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>30000</minimum>
+      <maximum>300000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>Path to domain socket.</description>
+  </property>
+
+  <property>
+    <name>hbase.rpc.protection</name>
+    <value>authentication</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/kerberos.json b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/kerberos.json
new file mode 100644
index 0000000..67664a9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/kerberos.json
@@ -0,0 +1,105 @@
+{
+  "services": [
+    {
+      "name": "HBASE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/hdfs"
+        },
+        {
+          "name": "hbase",
+          "principal": {
+            "value": "${hbase-env/hbase_user}@${realm}",
+            "type" : "user",
+            "configuration": "hbase-env/hbase_principal_name",
+            "local_username": "${hbase-env/hbase_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/hbase.headless.keytab",
+            "owner": {
+              "name": "${hbase-env/hbase_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "hbase-env/hbase_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hbase-site": {
+            "hbase.security.authentication": "kerberos",
+            "hbase.security.authorization": "true",
+            "zookeeper.znode.parent": "/hbase-secure",
+            "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController",
+            "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController",
+            "hbase.bulkload.staging.dir": "/apps/hbase/staging"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HBASE_MASTER",
+          "identities": [
+            {
+              "name": "hbase_master_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.master.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.master.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HBASE_REGIONSERVER",
+          "identities": [
+            {
+              "name": "hbase_regionserver_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.regionserver.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.regionserver.keytab.file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/metainfo.xml b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/metainfo.xml
new file mode 100644
index 0000000..b3ebe60
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/metainfo.xml
@@ -0,0 +1,158 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <displayName>HBase</displayName>
+      <comment>A Non-relational distributed database, plus Phoenix, a high performance SQL layer for low latency applications.</comment>
+      <version>1.1.0.2.3</version>
+      <components>
+        <component>
+          <name>HBASE_MASTER</name>
+          <displayName>HBase Master</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HBASE/HBASE_MASTER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HBASE_REGIONSERVER</name>
+          <displayName>RegionServer</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HBASE_CLIENT</name>
+          <displayName>HBase Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-site.xml</fileName>
+              <dictionaryName>hbase-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hbase-env.sh</fileName>
+              <dictionaryName>hbase-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-policy.xml</fileName>
+              <dictionaryName>hbase-policy</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hbase-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+
+        <component>
+          <name>PHOENIX_QUERY_SERVER</name>
+          <displayName>Phoenix Query Server</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/phoenix_queryserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+        <config-type>hbase-env</config-type>
+        <config-type>hbase-log4j</config-type>
+        <config-type>ranger-hbase-plugin-properties</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>


[2/8] ambari git commit: AMBARI-9993. Add support for management of Phoenix Query Server to HDP Stack (Nick Dimiduk via alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json b/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
new file mode 100644
index 0000000..ebf126b
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
@@ -0,0 +1,417 @@
+{
+    "configuration_attributes": {
+        "core-site": {
+            "final": {
+                "fs.defaultFS": "true"
+            }
+        }, 
+        "hbase-policy": {}, 
+        "hbase-log4j": {}, 
+        "ranger-hdfs-plugin-properties": {}, 
+        "hbase-env": {}, 
+        "hdfs-site": {
+            "final": {
+                "dfs.support.append": "true", 
+                "dfs.namenode.http-address": "true"
+            }
+        }, 
+        "zoo.cfg": {}, 
+        "hadoop-env": {}, 
+        "hadoop-policy": {}, 
+        "hdfs-log4j": {}, 
+        "hbase-site": {}, 
+        "ranger-hbase-plugin-properties": {}, 
+        "zookeeper-env": {}, 
+        "zookeeper-log4j": {}, 
+        "cluster-env": {}
+    }, 
+    "commandParams": {
+        "service_package_folder": "common-services/HBASE/0.96.0.2.0/package", 
+        "script": "scripts/hbase_regionserver.py", 
+        "hooks_folder": "HDP/2.0.6/hooks", 
+        "version": "2.3.0.0-1606", 
+        "excluded_hosts": "host1",
+        "command_timeout": "900", 
+        "script_type": "PYTHON"
+    }, 
+    "roleCommand": "CUSTOM_COMMAND", 
+    "kerberosCommandParams": [], 
+    "clusterName": "c1", 
+    "hostname": "c6405.ambari.apache.org", 
+    "hostLevelParams": {
+        "jdk_location": "http://c6405.ambari.apache.org:8080/resources/", 
+        "ambari_db_rca_password": "mapred", 
+        "java_home": "/usr/jdk64/jdk1.8.0_40", 
+        "ambari_db_rca_url": "jdbc:postgresql://c6405.ambari.apache.org/ambarirca", 
+        "jce_name": "jce_policy-8.zip", 
+        "custom_command": "RESTART", 
+        "oracle_jdbc_url": "http://c6405.ambari.apache.org:8080/resources//ojdbc6.jar", 
+        "repo_info": "[{\"baseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.3.0.0-1606\",\"osType\":\"redhat6\",\"repoId\":\"HDP-2.3\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.3.0.0\",\"latestBaseUrl\":\"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.3.0.0-1606\",\"baseSaved\":true},{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"osType\":\"redhat6\",\"repoId\":\"HDP-UTILS-1.1.0.20\",\"repoName\":\"HDP-UTILS\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"latestBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6\",\"baseSaved\":true}]", 
+        "group_list": "[\"hadoop\",\"users\"]", 
+        "agentCacheDir": "/var/lib/ambari-agent/cache", 
+        "stack_version": "2.3", 
+        "stack_name": "HDP", 
+        "db_name": "ambari", 
+        "jdk_name": "jdk-8u40-linux-x64.tar.gz", 
+        "ambari_db_rca_driver": "org.postgresql.Driver", 
+        "java_version": "8", 
+        "ambari_db_rca_username": "mapred", 
+        "db_driver_filename": "mysql-connector-java.jar", 
+        "user_list": "[\"zookeeper\",\"ambari-qa\",\"hdfs\",\"hbase\"]", 
+        "mysql_jdbc_url": "http://c6405.ambari.apache.org:8080/resources//mysql-connector-java.jar"
+    }, 
+    "commandType": "EXECUTION_COMMAND", 
+    "roleParams": {
+        "component_category": "SLAVE"
+    }, 
+    "serviceName": "HBASE", 
+    "role": "HBASE_REGIONSERVER", 
+    "forceRefreshConfigTags": [], 
+    "taskId": 54, 
+    "public_hostname": "c6405.ambari.apache.org", 
+    "configurations": {
+        "core-site": {
+            "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", 
+            "proxyuser_group": "users", 
+            "fs.trash.interval": "360", 
+            "ha.failover-controller.active-standby-elector.zk.op.retries": "120", 
+            "hadoop.http.authentication.simple.anonymous.allowed": "true", 
+            "hadoop.security.authentication": "simple", 
+            "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec", 
+            "ipc.client.connection.maxidletime": "30000", 
+            "mapreduce.jobtracker.webinterface.trusted": "false", 
+            "hadoop.security.authorization": "false", 
+            "net.topology.script.file.name": "/etc/hadoop/conf/topology_script.py", 
+            "ipc.server.tcpnodelay": "true", 
+            "ipc.client.connect.max.retries": "50", 
+            "hadoop.security.auth_to_local": "\n        DEFAULT", 
+            "io.file.buffer.size": "131072", 
+            "hadoop.proxyuser.hdfs.hosts": "*", 
+            "hadoop.proxyuser.hdfs.groups": "*", 
+            "ipc.client.idlethreshold": "8000", 
+            "fs.defaultFS": "hdfs://c6405.ambari.apache.org:8020"
+        }, 
+        "hbase-policy": {
+            "security.masterregion.protocol.acl": "*", 
+            "security.admin.protocol.acl": "*", 
+            "security.client.protocol.acl": "*"
+        }, 
+        "hbase-log4j": {
+            "content": "log4jproperties\nline2"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "hadoop.rpc.protection": "-", 
+            "ranger-hdfs-plugin-enabled": "No", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hadoop", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hadoop"
+        }, 
+        "hbase-env": {
+            "hbase_pid_dir": "/var/run/hbase", 
+            "hbase_regionserver_xmn_max": "512", 
+            "hbase_regionserver_xmn_ratio": "0.2", 
+            "hbase_user": "hbase", 
+            "hbase_master_heapsize": "1024m", 
+            "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable 
 JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{l
 og_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERV
 ER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}\"\n{% endif %}", 
+            "hbase_regionserver_heapsize": "1024m", 
+            "hbase_log_dir": "/var/log/hbase", 
+            "hbase_max_direct_memory_size": ""
+        }, 
+        "hdfs-site": {
+            "dfs.namenode.checkpoint.period": "21600", 
+            "dfs.namenode.avoid.write.stale.datanode": "true", 
+            "nfs.dump.dir": "/tmp/.hdfs-nfs", 
+            "dfs.namenode.startup.delay.block.deletion.sec": "3600", 
+            "dfs.namenode.checkpoint.txns": "1000000", 
+            "dfs.block.access.token.enable": "true", 
+            "dfs.support.append": "true", 
+            "dfs.datanode.address": "0.0.0.0:50010", 
+            "dfs.cluster.administrators": " hdfs", 
+            "dfs.datanode.balance.bandwidthPerSec": "6250000", 
+            "dfs.namenode.safemode.threshold-pct": "1.0f", 
+            "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", 
+            "dfs.namenode.rpc-address": "c6405.ambari.apache.org:8020", 
+            "dfs.permissions.enabled": "true", 
+            "dfs.client.read.shortcircuit": "true", 
+            "dfs.journalnode.https-address": "0.0.0.0:8481", 
+            "dfs.namenode.https-address": "c6405.ambari.apache.org:50470", 
+            "dfs.blockreport.initialDelay": "120", 
+            "dfs.journalnode.edits.dir": "/hadoop/hdfs/journalnode", 
+            "dfs.blocksize": "134217728", 
+            "dfs.datanode.max.transfer.threads": "16384", 
+            "dfs.heartbeat.interval": "3", 
+            "dfs.replication": "3", 
+            "dfs.namenode.handler.count": "25", 
+            "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", 
+            "fs.permissions.umask-mode": "022", 
+            "dfs.namenode.stale.datanode.interval": "30000", 
+            "dfs.datanode.ipc.address": "0.0.0.0:8010", 
+            "dfs.datanode.data.dir": "/hadoop/hdfs/data", 
+            "dfs.namenode.http-address": "c6405.ambari.apache.org:50070", 
+            "dfs.webhdfs.enabled": "true", 
+            "dfs.datanode.failed.volumes.tolerated": "0", 
+            "dfs.namenode.accesstime.precision": "0", 
+            "dfs.datanode.https.address": "0.0.0.0:50475", 
+            "dfs.namenode.write.stale.datanode.ratio": "1.0f", 
+            "dfs.namenode.secondary.http-address": "c6405.ambari.apache.org:50090", 
+            "nfs.exports.allowed.hosts": "* rw", 
+            "dfs.datanode.http.address": "0.0.0.0:50075", 
+            "dfs.datanode.du.reserved": "1073741824", 
+            "dfs.client.read.shortcircuit.streams.cache.size": "4096", 
+            "dfs.http.policy": "HTTP_ONLY", 
+            "dfs.permissions.superusergroup": "hdfs", 
+            "dfs.https.port": "50470", 
+            "dfs.journalnode.http-address": "0.0.0.0:8480", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "dfs.namenode.avoid.read.stale.datanode": "true", 
+            "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", 
+            "dfs.datanode.data.dir.perm": "750", 
+            "dfs.namenode.name.dir.restore": "true", 
+            "dfs.replication.max": "50", 
+            "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
+        }, 
+        "zoo.cfg": {
+            "clientPort": "2181", 
+            "autopurge.purgeInterval": "24", 
+            "syncLimit": "5", 
+            "dataDir": "/hadoop/zookeeper", 
+            "initLimit": "10", 
+            "tickTime": "2000", 
+            "autopurge.snapRetainCount": "30"
+        }, 
+        "hadoop-env": {
+            "proxyuser_group": "users", 
+            "hdfs_log_dir_prefix": "/var/log/hadoop", 
+            "hdfs_user": "hdfs", 
+            "namenode_opt_maxnewsize": "256m", 
+            "namenode_opt_maxpermsize": "256m", 
+            "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende
 d to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSiz
 e={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:M
 axPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DR
 FAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environme
 nt.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of
  hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-cl
 ient/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"", 
+            "namenode_heapsize": "1024m", 
+            "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", 
+            "namenode_opt_newsize": "256m", 
+            "nfsgateway_heapsize": "1024", 
+            "dtnode_heapsize": "1024m", 
+            "hadoop_root_logger": "INFO,RFA", 
+            "hadoop_heapsize": "1024", 
+            "hadoop_pid_dir_prefix": "/var/run/hadoop", 
+            "namenode_opt_permsize": "128m"
+        }, 
+        "hadoop-policy": {
+            "security.job.client.protocol.acl": "*", 
+            "security.job.task.protocol.acl": "*", 
+            "security.datanode.protocol.acl": "*", 
+            "security.namenode.protocol.acl": "*", 
+            "security.client.datanode.protocol.acl": "*", 
+            "security.inter.tracker.protocol.acl": "*", 
+            "security.refresh.usertogroups.mappings.protocol.acl": "hadoop", 
+            "security.client.protocol.acl": "*", 
+            "security.refresh.policy.protocol.acl": "hadoop", 
+            "security.admin.operations.protocol.acl": "hadoop", 
+            "security.inter.datanode.protocol.acl": "*"
+        }, 
+        "hdfs-log4j": {
+            "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.lo
 gger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender
 .console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logg
 er}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=
 false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.fi
 le}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\n
 log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
+        }, 
+        "hbase-site": {
+            "hbase.regionserver.wal.codec": "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec", 
+            "hbase.master.info.bindAddress": "0.0.0.0", 
+            "hbase.regionserver.port": "16020", 
+            "hbase.client.keyvalue.maxsize": "1048576", 
+            "hbase.hstore.compactionThreshold": "3", 
+            "hbase.hregion.majorcompaction.jitter": "0.50", 
+            "hbase.security.authentication": "simple", 
+            "hbase.rootdir": "hdfs://c6405.ambari.apache.org:8020/apps/hbase/data", 
+            "hbase.rpc.timeout": "60000", 
+            "hbase.regionserver.handler.count": "30", 
+            "hbase.regionserver.global.memstore.lowerLimit": "0.38", 
+            "hbase.rpc.protection": "authentication", 
+            "hbase.bucketcache.size": "", 
+            "hbase.bucketcache.percentage.in.combinedcache": "", 
+            "hbase.hregion.memstore.flush.size": "134217728", 
+            "hbase.superuser": "hbase", 
+            "hbase.coprocessor.region.classes": "", 
+            "hbase.zookeeper.property.clientPort": "2181", 
+            "hbase.regionserver.global.memstore.upperLimit": "0.4", 
+            "hbase.bucketcache.ioengine": "", 
+            "zookeeper.session.timeout": "90000", 
+            "hbase.regionserver.global.memstore.size": "${hbase.regionserver.global.memstore.upperLimit}", 
+            "hbase.tmp.dir": "/hadoop/hbase", 
+            "hfile.block.cache.size": "0.40", 
+            "hbase.hregion.max.filesize": "1073741824", 
+            "hbase.client.scanner.caching": "100", 
+            "hbase.client.retries.number": "35", 
+            "hbase.defaults.for.version.skip": "true", 
+            "hbase.master.info.port": "60010", 
+            "hbase.hregion.majorcompaction": "604800000", 
+            "hbase.zookeeper.quorum": "c6405.ambari.apache.org", 
+            "hbase.regionserver.info.port": "16030", 
+            "zookeeper.znode.parent": "/hbase-unsecure", 
+            "hbase.coprocessor.master.classes": "", 
+            "hbase.hstore.blockingStoreFiles": "10", 
+            "hbase.master.port": "16000", 
+            "hbase.security.authorization": "false", 
+            "phoenix.query.timeoutMs": "60000", 
+            "hbase.local.dir": "${hbase.tmp.dir}/local", 
+            "hbase.cluster.distributed": "true", 
+            "hbase.hregion.memstore.mslab.enabled": "true", 
+            "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", 
+            "hbase.zookeeper.useMulti": "true", 
+            "hbase.hregion.memstore.block.multiplier": "4"
+        }, 
+        "ranger-hbase-plugin-properties": {
+            "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
+            "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
+            "common.name.for.certificate": "-", 
+            "XAAUDIT.HDFS.IS_ENABLED": "false", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
+            "ranger-hbase-plugin-enabled": "No", 
+            "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
+            "XAAUDIT.DB.IS_ENABLED": "true", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
+            "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
+            "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
+            "policy_user": "ambari-qa", 
+            "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true", 
+            "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
+            "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
+            "SSL_TRUSTSTORE_PASSWORD": "changeit", 
+            "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
+            "REPOSITORY_CONFIG_USERNAME": "hbase", 
+            "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
+            "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
+            "REPOSITORY_CONFIG_PASSWORD": "hbase"
+        }, 
+        "zookeeper-env": {
+            "zk_log_dir": "/var/log/zookeeper", 
+            "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}", 
+            "zk_pid_dir": "/var/run/zookeeper", 
+            "zk_user": "zookeeper"
+        }, 
+        "zookeeper-log4j": {
+            "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4
 j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLIN
 GFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n"
+        }, 
+        "cluster-env": {
+            "security_enabled": "false", 
+            "hive_tar_source": "/usr/hdp/current/hive-client/hive.tar.gz", 
+            "hadoop-streaming_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mapreduce/", 
+            "pig_tar_source": "/usr/hdp/current/pig-client/pig.tar.gz", 
+            "hive_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/hive/", 
+            "ignore_groupsusers_create": "false", 
+            "smokeuser_keytab": "/etc/security/keytabs/smokeuser.headless.keytab", 
+            "hadoop-streaming_tar_source": "/usr/hdp/current/hadoop-mapreduce-client/hadoop-streaming.jar", 
+            "kerberos_domain": "EXAMPLE.COM", 
+            "tez_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/tez/", 
+            "mapreduce_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mapreduce/", 
+            "tez_tar_source": "/usr/hdp/current/tez-client/lib/tez.tar.gz", 
+            "pig_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/", 
+            "user_group": "hadoop", 
+            "mapreduce_tar_source": "/usr/hdp/current/hadoop-client/mapreduce.tar.gz", 
+            "sqoop_tar_destination_folder": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/", 
+            "smokeuser": "ambari-qa", 
+            "sqoop_tar_source": "/usr/hdp/current/sqoop-client/sqoop.tar.gz"
+        }
+    }, 
+    "configurationTags": {
+        "hbase-policy": {
+            "tag": "version1"
+        }, 
+        "hbase-log4j": {
+            "tag": "version1"
+        }, 
+        "ranger-hdfs-plugin-properties": {
+            "tag": "version1"
+        }, 
+        "zoo.cfg": {
+            "tag": "version1"
+        }, 
+        "ranger-hbase-plugin-properties": {
+            "tag": "version1"
+        }, 
+        "hbase-env": {
+            "tag": "version1"
+        }, 
+        "core-site": {
+            "tag": "version1"
+        }, 
+        "hbase-site": {
+            "tag": "version1"
+        }, 
+        "hdfs-site": {
+            "tag": "version1"
+        }, 
+        "hadoop-policy": {
+            "tag": "version1"
+        }, 
+        "hdfs-log4j": {
+            "tag": "version1"
+        }, 
+        "hadoop-env": {
+            "tag": "version1"
+        }, 
+        "zookeeper-env": {
+            "tag": "version1"
+        }, 
+        "zookeeper-log4j": {
+            "tag": "version1"
+        }, 
+        "cluster-env": {
+            "tag": "version1"
+        }
+    }, 
+    "commandId": "5-0", 
+    "clusterHostInfo": {
+        "snamenode_host": [
+            "c6405.ambari.apache.org"
+        ], 
+        "all_ping_ports": [
+            "8670"
+        ], 
+        "all_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "hbase_rs_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "slave_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "namenode_host": [
+            "c6405.ambari.apache.org"
+        ], 
+        "hbase_master_hosts": [
+            "c6405.ambari.apache.org"
+        ], 
+        "all_racks": [
+            "/default-rack"
+        ], 
+        "all_ipv4_ips": [
+            "192.168.64.105"
+        ], 
+        "ambari_server_host": [
+            "c6405.ambari.apache.org"
+        ], 
+        "zookeeper_hosts": [
+            "c6405.ambari.apache.org"
+        ]
+    }
+}


[6/8] ambari git commit: AMBARI-9993. Add support for management of Phoenix Query Server to HDP Stack (Nick Dimiduk via alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/files/draining_servers.rb
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/files/draining_servers.rb b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/files/draining_servers.rb
new file mode 100644
index 0000000..5bcb5b6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/files/draining_servers.rb
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper 
+
+require 'optparse'
+include Java
+
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.client.HBaseAdmin
+import org.apache.hadoop.hbase.zookeeper.ZKUtil
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "draining_servers"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
+                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+  opts.on('-h', '--help', 'Display usage information') do
+    puts opts
+    exit
+  end
+  options[:debug] = false
+  opts.on('-d', '--debug', 'Display extra debug logging') do
+    options[:debug] = true
+  end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+  serverInfos = admin.getClusterStatus().getServerInfo()
+  servers = []
+  for server in serverInfos
+    servers << server.getServerName()
+  end
+  return servers
+end
+
+def getServerNames(hostOrServers, config)
+  ret = []
+  
+  for hostOrServer in hostOrServers
+    # check whether it is already serverName. No need to connect to cluster
+    parts = hostOrServer.split(',')
+    if parts.size() == 3
+      ret << hostOrServer
+    else 
+      admin = HBaseAdmin.new(config) if not admin
+      servers = getServers(admin)
+
+      hostOrServer = hostOrServer.gsub(/:/, ",")
+      for server in servers 
+        ret << server if server.start_with?(hostOrServer)
+      end
+    end
+  end
+  
+  admin.close() if admin
+  return ret
+end
+
+def addServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.createAndFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+def removeServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.deleteNodeFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+# list servers in draining mode
+def listServers(options)
+  config = HBaseConfiguration.create()
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+  servers.each {|server| puts server}
+end
+
+hostOrServers = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+  apacheLogger = LogFactory.getLog(NAME)
+  # Configure log4j to not spew so much
+  unless (options[:debug]) 
+    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+  end
+  return apacheLogger
+end
+
+# Create a logger and save it to ruby global
+$LOG = configureLogging(options)
+case ARGV[0]
+  when 'add'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    addServers(options, hostOrServers)
+  when 'remove'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    removeServers(options, hostOrServers)
+  when 'list'
+    listServers(options)
+  else
+    puts optparse
+    exit 3
+end

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..5c320c0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+grep -q $data /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/functions.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/functions.py
new file mode 100644
index 0000000..e6e7fb9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/functions.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase.py
new file mode 100644
index 0000000..eb62f92
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management import *
+import sys
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase(name=None):
+  import params
+  XmlConfig("hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site']
+  )
+
+# name is 'master' or 'regionserver' or 'client'
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase(name=None):
+  import params
+
+  Directory( params.hbase_conf_dir_prefix,
+      mode=0755
+  )
+
+  Directory( params.hbase_conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      recursive = True
+  )
+
+  Directory (params.tmp_dir,
+             owner = params.hbase_user,
+             mode=0775,
+             recursive = True,
+             cd_access="a",
+  )
+
+  Directory (params.local_dir,
+             owner = params.hbase_user,
+             group = params.user_group,
+             mode=0775,
+             recursive = True
+  )
+
+  Directory (os.path.join(params.local_dir, "jars"),
+             owner = params.hbase_user,
+             group = params.user_group,
+             mode=0775,
+             recursive = True
+  )
+
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  XmlConfig( "core-site.xml",
+             conf_dir = params.hbase_conf_dir,
+             configurations = params.config['configurations']['core-site'],
+             configuration_attributes=params.config['configuration_attributes']['core-site'],
+             owner = params.hbase_user,
+             group = params.user_group
+  )
+
+  if 'hdfs-site' in params.config['configurations']:
+    XmlConfig( "hdfs-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+
+    XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+    )
+
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-policy'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-policy'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner = params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template)
+  )     
+       
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{name}_jaas.conf"))
+  
+  if name != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      recursive = True
+    )
+  
+    Directory (params.log_dir,
+      owner = params.hbase_user,
+      recursive = True
+    )
+
+  if (params.log4j_props != None):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hbase_user
+    )
+  if name in ["master","regionserver"]:
+    params.HdfsDirectory(params.hbase_hdfs_root_dir,
+                         action="create_delayed",
+                         owner=params.hbase_user
+    )
+    params.HdfsDirectory(params.hbase_staging_dir,
+                         action="create_delayed",
+                         owner=params.hbase_user,
+                         mode=0711
+    )
+    params.HdfsDirectory(None, action="create")
+
+def hbase_TemplateConfig(name, tag=None):
+  import params
+
+  TemplateConfig( format("{hbase_conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_client.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_client.py
new file mode 100644
index 0000000..3955b66
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_client.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hbase import hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseClientWindows(HbaseClient):
+  pass
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseClientDefault(HbaseClient):
+  def get_stack_to_component(self):
+    return {"HDP": "hbase-client"}
+
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hbase-client {version}"))
+
+      # set all of the hadoop clientss since hbase client is upgraded as part
+      # of the final "CLIENTS" group and we need to ensure that hadoop-client
+      # is also set
+      Execute(format("hdp-select set hadoop-client {version}"))
+
+
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000..54d8c0e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_decommission.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  File(params.region_drainer, content=StaticFile("draining_servers.rb"), owner=params.hbase_user, mode="f")
+
+  hosts = params.hbase_excluded_hosts.split(",")
+  for host in hosts:
+    if host:
+      if params.hbase_drain_only == True:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+      else:
+        regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_mover} unload {host}")
+        Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True)
+        Execute(regionmover_cmd, user=params.hbase_user, logoutput=True)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  kinit_cmd = params.kinit_cmd
+
+  File(params.region_drainer,
+       content=StaticFile("draining_servers.rb"),
+       mode=0755
+  )
+  
+  if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
+
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
+
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_master.py
new file mode 100644
index 0000000..30198c9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_master.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseMaster(Script):
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='master')
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+    hbase_decommission(env)
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseMasterWindows(HbaseMaster):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_master_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_master_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_master_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseMasterDefault(HbaseMaster):
+  def get_stack_to_component(self):
+    return {"HDP": "hbase-master"}
+
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-master")
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    setup_ranger_hbase()  
+    hbase_service('master', action = 'start')
+    
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    hbase_service('master', action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
+    check_process_status(pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.master.keytab.file',
+                           'hbase.master.kerberos.principal']
+      props_read_check = ['hbase.master.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                  props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.master.keytab.file' not in security_params['hbase-site']
+               or 'hbase.master.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.master.keytab.file'],
+                                security_params['hbase-site']['hbase.master.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  HbaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..882b982
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_regionserver.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hbase import hbase
+from hbase_service import hbase_service
+import upgrade
+from setup_ranger_hbase import setup_ranger_hbase
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseRegionServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hbase(name='regionserver')
+
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseRegionServerWindows(HbaseRegionServer):
+  def start(self, env):
+    import status_params
+    self.configure(env)
+    Service(status_params.hbase_regionserver_win_service_name, action="start")
+
+  def stop(self, env):
+    import status_params
+    env.set_params(status_params)
+    Service(status_params.hbase_regionserver_win_service_name, action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_windows_service_status(status_params.hbase_regionserver_win_service_name)
+
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseRegionServerDefault(HbaseRegionServer):
+  def get_stack_to_component(self):
+    return {"HDP": "hbase-regionserver"}
+
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-regionserver")
+
+  def post_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+    upgrade.post_regionserver(env)
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    setup_ranger_hbase()  
+    hbase_service( 'regionserver',
+      action = 'start'
+    )
+
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+    check_process_status(pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.regionserver.keytab.file',
+                           'hbase.regionserver.kerberos.principal']
+      props_read_check = ['hbase.regionserver.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                   props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.regionserver.keytab.file' not in security_params['hbase-site']
+               or 'hbase.regionserver.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.regionserver.keytab.file'],
+                                security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_service.py
new file mode 100644
index 0000000..fb565a7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_service.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+    
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {hbase_conf_dir}")
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+    
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      
+      Execute ( daemon_cmd,
+        not_if = no_op_test,
+        user = params.hbase_user
+      )
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role}")
+
+      Execute ( daemon_cmd,
+        user = params.hbase_user,
+        # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
+        timeout = 30,
+        on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `cat {pid_file}`"),
+      )
+      
+      Execute (format("rm -f {pid_file}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_upgrade.py
new file mode 100644
index 0000000..610f527
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/hbase_upgrade.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script import Script
+from resource_management.core.resources.system import Execute
+
+class HbaseMasterUpgrade(Script):
+
+  def snapshot(self, env):
+    import params
+
+    snap_cmd = "echo 'snapshot_all' | {0} shell".format(params.hbase_cmd)
+
+    exec_cmd = "{0} {1}".format(params.kinit_cmd, snap_cmd)
+
+    Execute(exec_cmd, user=params.hbase_user)
+
+if __name__ == "__main__":
+  HbaseMasterUpgrade().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params.py
new file mode 100644
index 0000000..a10c1d4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params_linux.py
new file mode 100644
index 0000000..abde3f2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params_linux.py
@@ -0,0 +1,249 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from functions import calc_xmn_from_xms
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+version = default("/commandParams/version", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+
+#hadoop params
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  hadoop_bin_dir = format("/usr/hdp/current/hadoop-client/bin")
+  daemon_script = format('/usr/hdp/current/hbase-client/bin/hbase-daemon.sh')
+  region_mover = format('/usr/hdp/current/hbase-client/bin/region_mover.rb')
+  region_drainer = format('/usr/hdp/current/hbase-client/bin/draining_servers.rb')
+  hbase_cmd = format('/usr/hdp/current/hbase-client/bin/hbase')
+else:
+  hadoop_bin_dir = "/usr/bin"
+  daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+  region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+  region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+  hbase_cmd = "/usr/lib/hbase/bin/hbase"
+
+phx_daemon_script = '/usr/hdp/current/phoenix-server/bin/queryserver.py'
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hbase_conf_dir_prefix = "/etc/hbase"
+hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
+hbase_included_hosts = config['commandParams']['included_hosts']
+
+hbase_user = status_params.hbase_user
+hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
+
+regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
+regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
+regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio']
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  hbase_max_direct_memory_size  = config['configurations']['hbase-env']['hbase_max_direct_memory_size']
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+# TODO UPGRADE default, update site during upgrade
+_local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
+local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
+
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
+
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  metric_collector_host = ams_collector_hosts[0]
+  metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+  if metric_collector_port and metric_collector_port.find(':') != -1:
+    metric_collector_port = metric_collector_port.split(':')[1]
+  pass
+
+# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
+if 'slave_hosts' in config['clusterHostInfo']:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+else:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
+
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+smokeuser_permissions = "RWXCA"
+service_check_data = functions.get_unique_id_and_date()
+user_group = config['configurations']['cluster-env']["user_group"]
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+
+master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path()
+if security_enabled:
+  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
+else:
+  kinit_cmd = ""
+
+#log4j.properties
+if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
+  log4j_props = config['configurations']['hbase-log4j']['content']
+else:
+  log4j_props = None
+  
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_staging_dir = "/apps/hbase/staging"
+#for create_hdfs_directory
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+kinit_path_local = functions.get_kinit_path()
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsDirectory = functools.partial(
+  HdfsDirectory,
+  conf_dir=hadoop_conf_dir,
+  hdfs_user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
+)
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  command_role = default("/role", "")
+  if command_role == "HBASE_MASTER" or command_role == "HBASE_REGIONSERVER":
+    role_root = "master" if command_role == "HBASE_MASTER" else "regionserver"
+
+    daemon_script=format("/usr/hdp/current/hbase-{role_root}/bin/hbase-daemon.sh")
+    region_mover = format("/usr/hdp/current/hbase-{role_root}/bin/region_mover.rb")
+    region_drainer = format("/usr/hdp/current/hbase-{role_root}/bin/draining_servers.rb")
+    hbase_cmd = format("/usr/hdp/current/hbase-{role_root}/bin/hbase")
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  # Setting Flag value for ranger hbase plugin
+  enable_ranger_hbase = False
+  ranger_plugin_enable = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled","no")
+  if ranger_plugin_enable.lower() == 'yes':
+    enable_ranger_hbase = True
+  elif ranger_plugin_enable.lower() == 'no':
+    enable_ranger_hbase = False
+
+# ranger host
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+has_ranger_admin = not len(ranger_admin_hosts) == 0    
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+
+# ranger hbase properties
+policymgr_mgr_url = default("/configurations/admin-properties/policymgr_external_url", "http://localhost:6080")
+sql_connector_jar = default("/configurations/admin-properties/SQL_CONNECTOR_JAR", "/usr/share/java/mysql-connector-java.jar")
+xa_audit_db_flavor = default("/configurations/admin-properties/DB_FLAVOR", "MYSQL")
+xa_audit_db_name = default("/configurations/admin-properties/audit_db_name", "ranger_audit")
+xa_audit_db_user = default("/configurations/admin-properties/audit_db_user", "rangerlogger")
+xa_audit_db_password = default("/configurations/admin-properties/audit_db_password", "rangerlogger")
+xa_db_host = default("/configurations/admin-properties/db_host", "localhost")
+repo_name = str(config['clusterName']) + '_hbase'
+db_enabled = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.DB.IS_ENABLED", "false")
+hdfs_enabled = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.IS_ENABLED", "false")
+hdfs_dest_dir = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINATION_DIRECTORY", "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/app-type/time:yyyyMMdd")
+hdfs_buffer_dir = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY", "__REPLACE__LOG_DIR/hadoop/app-type/audit")
+hdfs_archive_dir = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY", "__REPLACE__LOG_DIR/hadoop/app-type/audit/archive")
+hdfs_dest_file = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_FILE", "hostname-audit.log")
+hdfs_dest_flush_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS", "900")
+hdfs_dest_rollover_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS", "86400")
+hdfs_dest_open_retry_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS", "60")
+hdfs_buffer_file = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_FILE", "time:yyyyMMdd-HHmm.ss.log")
+hdfs_buffer_flush_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS", "60")
+hdfs_buffer_rollover_int_sec = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS", "600")
+hdfs_archive_max_file_count = default("/configurations/ranger-hbase-plugin-properties/XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT", "10")
+ssl_keystore_file = default("/configurations/ranger-hbase-plugin-properties/SSL_KEYSTORE_FILE_PATH", "/etc/hadoop/conf/ranger-plugin-keystore.jks")
+ssl_keystore_password = default("/configurations/ranger-hbase-plugin-properties/SSL_KEYSTORE_PASSWORD", "myKeyFilePassword")
+ssl_truststore_file = default("/configurations/ranger-hbase-plugin-properties/SSL_TRUSTSTORE_FILE_PATH", "/etc/hadoop/conf/ranger-plugin-truststore.jks")
+ssl_truststore_password = default("/configurations/ranger-hbase-plugin-properties/SSL_TRUSTSTORE_PASSWORD", "changeit")
+grant_revoke = default("/configurations/ranger-hbase-plugin-properties/UPDATE_XAPOLICIES_ON_GRANT_REVOKE","true")
+common_name_for_certificate = default("/configurations/ranger-hbase-plugin-properties/common.name.for.certificate", "-")
+
+zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+hbase_zookeeoer_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+hbase_zookeeper_property_clientPort = config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort']
+hbase_security_authentication = config['configurations']['hbase-site']['hbase.security.authentication']
+hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+
+repo_config_username = default("/configurations/ranger-hbase-plugin-properties/REPOSITORY_CONFIG_USERNAME", "hbase")
+repo_config_password = default("/configurations/ranger-hbase-plugin-properties/REPOSITORY_CONFIG_PASSWORD", "hbase")
+
+admin_uname = default("/configurations/ranger-env/admin_username", "admin")
+admin_password = default("/configurations/ranger-env/admin_password", "admin")
+admin_uname_password = format("{admin_uname}:{admin_password}")
+
+ambari_ranger_admin = default("/configurations/ranger-env/ranger_admin_username", "amb_ranger_admin")
+ambari_ranger_password = default("/configurations/ranger-env/ranger_admin_password", "ambari123")
+policy_user = default("/configurations/ranger-hbase-plugin-properties/policy_user", "ambari-qa")
+
+#For curl command in ranger plugin to get db connector
+jdk_location = config['hostLevelParams']['jdk_location']
+java_share_dir = '/usr/share/java'
+if xa_audit_db_flavor and xa_audit_db_flavor.lower() == 'mysql':
+  jdbc_symlink_name = "mysql-jdbc-driver.jar"
+  jdbc_jar_name = "mysql-connector-java.jar"
+elif xa_audit_db_flavor and xa_audit_db_flavor.lower() == 'oracle':
+  jdbc_jar_name = "ojdbc6.jar"
+  jdbc_symlink_name = "oracle-jdbc-driver.jar"
+
+downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}")
+
+driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params_windows.py
new file mode 100644
index 0000000..571b93c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/params_windows.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+import status_params
+
+# server configurations
+config = Script.get_config()
+hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+hbase_bin_dir = os.path.join(os.environ["HBASE_HOME"],'bin')
+hbase_executable = os.path.join(hbase_bin_dir,"hbase.cmd")
+hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+hbase_user = "hadoop"
+
+#decomm params
+region_drainer = os.path.join(hbase_bin_dir,"draining_servers.rb")
+region_mover = os.path.join(hbase_bin_dir,"region_mover.rb")
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = config['commandParams']['mark_draining_only']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/phoenix_queryserver.py
new file mode 100644
index 0000000..3ba5f7f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/phoenix_queryserver.py
@@ -0,0 +1,55 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script import Script
+from phoenix_service import phoenix_service
+
+class PhoenixQueryServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def get_stack_to_component(self):
+    return {"HDP": "phoenix-server"}
+
+  def configure(self, env):
+    pass
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    phoenix_service('start')
+
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    phoenix_service('stop')
+
+  def status(self, env):
+    import params
+    env.set_params(params)
+    phoenix_service('status')
+
+  def security_status(self, env):
+    self.put_structured_out({"securityState": "UNSECURED"})
+
+if __name__ == "__main__":
+  PhoenixQueryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/phoenix_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/phoenix_service.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/phoenix_service.py
new file mode 100644
index 0000000..f35b9a0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/phoenix_service.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import check_process_status, format
+
+def phoenix_service(action = 'start'): # 'start', 'stop', 'status'
+
+  import params
+
+  cmd = format("{phx_daemon_script}")
+  pid_file = format("{pid_dir}/phoenix-{hbase_user}-server.pid")
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+
+  if action == 'start':
+    Execute(format("{cmd} start"))
+
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop")
+    Execute(daemon_cmd,
+      timeout = 30,
+      on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `cat {pid_file}`"),
+    )
+    Execute(format("rm -f {pid_file}"))
+
+  elif action == 'status':
+    check_process_status(pid_file)

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/service_check.py
new file mode 100644
index 0000000..a60ebad
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/service_check.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+import functions
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HbaseServiceCheck(Script):
+  pass
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HbaseServiceCheckWindows(HbaseServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.hdp_root, "Run-SmokeTests.cmd")
+    service = "HBASE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HbaseServiceCheckDefault(HbaseServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    test_cmd = format("fs -test -e {output_file}")
+    smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};") if params.security_enabled else ""
+    hbase_servicecheck_file = format("{exec_tmp_dir}/hbase-smoke.sh")
+  
+    File( format("{exec_tmp_dir}/hbaseSmokeVerify.sh"),
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
+      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+      )
+
+    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
+  
+    Execute( servicecheckcmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+  
+    Execute ( smokeverifycmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+    
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/setup_ranger_hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/setup_ranger_hbase.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/setup_ranger_hbase.py
new file mode 100644
index 0000000..7626de8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/setup_ranger_hbase.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+import fileinput
+import subprocess
+import json
+import re
+import os
+from resource_management import *
+from resource_management.libraries.functions.ranger_functions import Rangeradmin
+from resource_management.core.logger import Logger
+
+def setup_ranger_hbase():
+  import params
+  
+  if params.has_ranger_admin:
+    File(params.downloaded_custom_connector,
+         content = DownloadSource(params.driver_curl_source)
+    )
+
+    if not os.path.isfile(params.driver_curl_target):
+      Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.driver_curl_target),
+              path=["/bin", "/usr/bin/"],
+              sudo=True)
+
+    try:
+      command = 'hdp-select status hbase-client'
+      return_code, hdp_output = shell.call(command, timeout=20)
+    except Exception, e:
+      Logger.error(str(e))
+      raise Fail('Unable to execute hdp-select command to retrieve the version.')
+
+    if return_code != 0:
+      raise Fail('Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+    hdp_version = re.sub('hbase-client - ', '', hdp_output).strip()
+    match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
+
+    if match is None:
+      raise Fail('Failed to get extracted version')
+
+    file_path = '/usr/hdp/'+ hdp_version +'/ranger-hbase-plugin/install.properties'
+    if not os.path.isfile(file_path):
+      raise Fail('Ranger HBase plugin install.properties file does not exist at {0}'.format(file_path))
+    
+    ranger_hbase_dict = ranger_hbase_properties()
+    hbase_repo_data = hbase_repo_properties()
+
+    write_properties_to_file(file_path, ranger_hbase_dict)
+
+    if params.enable_ranger_hbase:
+      cmd = format('cd /usr/hdp/{hdp_version}/ranger-hbase-plugin/ && sh enable-hbase-plugin.sh')
+      ranger_adm_obj = Rangeradmin(url=ranger_hbase_dict['POLICY_MGR_URL'])
+      response_code, response_recieved = ranger_adm_obj.check_ranger_login_urllib2(ranger_hbase_dict['POLICY_MGR_URL'] + '/login.jsp', 'test:test')
+
+      if response_code is not None and response_code == 200:
+        ambari_ranger_admin, ambari_ranger_password = ranger_adm_obj.create_ambari_admin_user(params.ambari_ranger_admin, params.ambari_ranger_password, params.admin_uname_password)
+        ambari_username_password_for_ranger = ambari_ranger_admin + ':' + ambari_ranger_password
+        if ambari_ranger_admin != '' and ambari_ranger_password != '':
+          repo = ranger_adm_obj.get_repository_by_name_urllib2(ranger_hbase_dict['REPOSITORY_NAME'], 'hbase', 'true', ambari_username_password_for_ranger)
+          if repo and repo['name'] == ranger_hbase_dict['REPOSITORY_NAME']:
+            Logger.info('Hbase Repository exist')
+          else:
+            response = ranger_adm_obj.create_repository_urllib2(hbase_repo_data, ambari_username_password_for_ranger, params.policy_user)
+            if response is not None:
+              Logger.info('Hbase Repository created in Ranger admin')
+            else:
+              Logger.info('Hbase Repository creation failed in Ranger admin')
+        else:
+          Logger.info('Ambari admin username and password are blank ')
+      else:
+          Logger.info('Ranger service is not started on given host')
+    else:
+      cmd = format('cd /usr/hdp/{hdp_version}/ranger-hbase-plugin/ && sh disable-hbase-plugin.sh')
+
+    Execute(cmd, environment={'JAVA_HOME': params.java64_home}, logoutput=True)                    
+  else:
+    Logger.info('Ranger admin not installed')
+
+
+def write_properties_to_file(file_path, value):
+  for key in value:
+    modify_config(file_path, key, value[key])
+
+
+def modify_config(filepath, variable, setting):
+  var_found = False
+  already_set = False
+  V=str(variable)
+  S=str(setting)
+  # use quotes if setting has spaces #
+  if ' ' in S:
+    S = '%s' % S
+  for line in fileinput.input(filepath, inplace = 1):
+    # process lines that look like config settings #
+    if not line.lstrip(' ').startswith('#') and '=' in line:
+      _infile_var = str(line.split('=')[0].rstrip(' '))
+      _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+      # only change the first matching occurrence #
+      if var_found == False and _infile_var.rstrip(' ') == V:
+        var_found = True
+        # don't change it if it is already set #
+        if _infile_set.lstrip(' ') == S:
+          already_set = True
+        else:
+          line = "%s=%s\n" % (V, S)
+    sys.stdout.write(line)
+
+  # Append the variable if it wasn't found #
+  if not var_found:
+    with open(filepath, "a") as f:
+        f.write("%s=%s\n" % (V, S))
+  elif already_set == True:
+    pass
+  else:
+    pass
+
+  return
+
+def ranger_hbase_properties():
+  import params
+
+  ranger_hbase_properties = dict()
+
+  ranger_hbase_properties['POLICY_MGR_URL'] = params.policymgr_mgr_url
+  ranger_hbase_properties['SQL_CONNECTOR_JAR'] = params.sql_connector_jar
+  ranger_hbase_properties['XAAUDIT.DB.FLAVOUR'] = params.xa_audit_db_flavor
+  ranger_hbase_properties['XAAUDIT.DB.DATABASE_NAME'] = params.xa_audit_db_name
+  ranger_hbase_properties['XAAUDIT.DB.USER_NAME'] = params.xa_audit_db_user
+  ranger_hbase_properties['XAAUDIT.DB.PASSWORD'] = params.xa_audit_db_password
+  ranger_hbase_properties['XAAUDIT.DB.HOSTNAME'] = params.xa_db_host
+  ranger_hbase_properties['REPOSITORY_NAME'] = params.repo_name
+  ranger_hbase_properties['XAAUDIT.DB.IS_ENABLED'] = params.db_enabled
+
+  ranger_hbase_properties['XAAUDIT.HDFS.IS_ENABLED'] = params.hdfs_enabled
+  ranger_hbase_properties['XAAUDIT.HDFS.DESTINATION_DIRECTORY'] = params.hdfs_dest_dir
+  ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY'] = params.hdfs_buffer_dir
+  ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY'] = params.hdfs_archive_dir
+  ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_FILE'] = params.hdfs_dest_file
+  ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS'] = params.hdfs_dest_flush_int_sec
+  ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS'] = params.hdfs_dest_rollover_int_sec
+  ranger_hbase_properties['XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS'] = params.hdfs_dest_open_retry_int_sec
+  ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FILE'] = params.hdfs_buffer_file
+  ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS'] = params.hdfs_buffer_flush_int_sec
+  ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS'] = params.hdfs_buffer_rollover_int_sec
+  ranger_hbase_properties['XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT'] = params.hdfs_archive_max_file_count
+
+  ranger_hbase_properties['SSL_KEYSTORE_FILE_PATH'] = params.ssl_keystore_file
+  ranger_hbase_properties['SSL_KEYSTORE_PASSWORD'] = params.ssl_keystore_password
+  ranger_hbase_properties['SSL_TRUSTSTORE_FILE_PATH'] = params.ssl_truststore_file
+  ranger_hbase_properties['SSL_TRUSTSTORE_PASSWORD'] = params.ssl_truststore_password
+   
+  ranger_hbase_properties['UPDATE_XAPOLICIES_ON_GRANT_REVOKE'] = params.grant_revoke
+
+  return ranger_hbase_properties    
+
+def hbase_repo_properties():
+  import params
+
+  config_dict = dict()
+  config_dict['username'] = params.repo_config_username
+  config_dict['password'] = params.repo_config_password
+  config_dict['hadoop.security.authentication'] = params.hadoop_security_authentication
+  config_dict['hbase.security.authentication'] = params.hbase_security_authentication
+  config_dict['hbase.zookeeper.property.clientPort'] = params.hbase_zookeeper_property_clientPort
+  config_dict['hbase.zookeeper.quorum'] = params.hbase_zookeeoer_quorum
+  config_dict['zookeeper.znode.parent'] = params.zookeeper_znode_parent
+  config_dict['commonNameForCertificate'] = params.common_name_for_certificate
+
+  if params.security_enabled:
+    config_dict['hbase.master.kerberos.principal'] = params.master_jaas_princ
+  else:
+    config_dict['hbase.master.kerberos.principal'] = ''
+
+  repo= dict()
+  repo['isActive'] = "true"
+  repo['config'] = json.dumps(config_dict)
+  repo['description'] = "hbase repo"
+  repo['name'] = params.repo_name
+  repo['repositoryType'] = "hbase"
+  repo['assetType'] = '2'
+
+  data = json.dumps(repo)
+
+  return data

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/status_params.py
new file mode 100644
index 0000000..14a06d3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/status_params.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ambari_commons import OSCheck
+
+config = Script.get_config()
+
+if OSCheck.is_windows_family():
+  hbase_master_win_service_name = "master"
+  hbase_regionserver_win_service_name = "regionserver"
+else:
+  pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+  hbase_user = config['configurations']['hbase-env']['hbase_user']
+
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = config['configurations']['cluster-env']['security_enabled']
+  kinit_path_local = functions.get_kinit_path()
+  tmp_dir = Script.get_tmp_dir()
+
+
+  hbase_conf_dir_prefix = "/etc/hbase"
+  hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/upgrade.py
new file mode 100644
index 0000000..6f2e258
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/scripts/upgrade.py
@@ -0,0 +1,49 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from resource_management.core.resources.system import Execute
+from resource_management.core.shell import call
+from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.decorator import retry
+
+def prestart(env, hdp_component):
+  import params
+
+  if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    Execute("hdp-select set {0} {1}".format(hdp_component, params.version))
+
+def post_regionserver(env):
+  import params
+  env.set_params(params)
+
+  check_cmd = "echo 'status \"simple\"' | {0} shell".format(params.hbase_cmd)
+
+  exec_cmd = "{0} {1}".format(params.kinit_cmd, check_cmd)
+  call_and_match(exec_cmd, params.hbase_user, params.hostname.lower() + ":")
+
+
+@retry(times=15, sleep_time=2, err_class=Fail)
+def call_and_match(cmd, user, regex):
+
+  code, out = call(cmd, user=user)
+
+  if not (out and re.search(regex, out)):
+    raise Fail("Could not verify RS available")

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
new file mode 100644
index 0000000..50234f9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -0,0 +1,105 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+{% if has_metric_collector %}
+
+*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.slave.host.name={{hostname}}
+hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.period=10
+hbase.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+jvm.period=10
+jvm.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+rpc.period=10
+rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.sink.timeline.period=10
+hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+{% else %}
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8663
+
+{% endif %}


[5/8] ambari git commit: AMBARI-9993. Add support for management of Phoenix Query Server to HDP Stack (Nick Dimiduk via alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
new file mode 100644
index 0000000..462bef4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -0,0 +1,104 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+{% if has_metric_collector %}
+
+*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.slave.host.name={{hostname}}
+hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.period=10
+hbase.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+jvm.period=10
+jvm.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+rpc.period=10
+rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.sink.timeline.period=10
+hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+{% else %}
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8656
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8656
+
+{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..458da95
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,44 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..38f9721
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_grant_permissions.j2
new file mode 100644
index 0000000..3378983
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,39 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_master_jaas.conf.j2
new file mode 100644
index 0000000..a93c36c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100644
index 0000000..7097481
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/regionservers.j2 b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/regionservers.j2
new file mode 100644
index 0000000..fc6cc37
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/package/templates/regionservers.j2
@@ -0,0 +1,20 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/widgets.json b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/widgets.json
new file mode 100644
index 0000000..a64a2c8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/1.1.0.2.3/widgets.json
@@ -0,0 +1,192 @@
+{
+  "layouts": [
+  {
+    "layout_name": "default_hbase_layout",
+    "section_name": "HBASE_SUMMARY",
+    "widgetLayoutInfo": [
+      {
+        "widget_name": "RS_READS_WRITES",
+        "display_name": "RegionServer Reads and Writes",
+        "description": "This widget shows all the read requests and write requests on all regions for a RegionServer",
+        "widget_type": "GRAPH",
+        "is_visible": true,
+        "metrics": [
+          {
+            "name": "regionserver.Server.Get_num_ops",
+            "ambari_id": "metrics/hbase/regionserver/Server/Get_num_ops",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER",
+            "host_component_criteria": "isActive=true"
+          },
+          {
+            "name": "regionserver.Server.Scan_num_ops",
+            "ambari_id": "metrics/hbase/regionserver/Server/Scan_num_ops",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER"
+          },
+          {
+            "name": "regionserver.Server.Append_num_ops",
+            "ambari_id": "metrics/hbase/regionserver/Server/Append_num_ops",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER"
+          },
+          {
+            "name": "regionserver.Server.Delete_num_ops",
+            "ambari_id": "metrics/hbase/regionserver/Server/Delete_num_ops",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER"
+          },
+          {
+            "name": "regionserver.Server.Increment_num_ops",
+            "ambari_id": "metrics/hbase/regionserver/Server/Increment_num_ops",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER"
+          },
+          {
+            "name": "regionserver.Server.Mutate_num_ops",
+            "ambari_id": "metrics/hbase/regionserver/Server/Mutate_num_ops",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER"
+          }
+        ],
+        "values": [
+          {
+            "name": "Read Requests",
+            "value": "`${regionserver.Server.Get_num_ops + regionserver.Server.Scan_num_ops}`"
+          },
+          {
+            "name": "Write Requests",
+            "value": "`${metrics.hbase.regionserver.Server.Append_num_ops + metrics.hbase.regionserver.Server.Delete_num_ops + metrics.hbase.regionserver.Server.Increment_num_ops + metrics.hbase.regionserver.Server.Mutate_num_ops}`"
+          }
+        ],
+        "properties": {
+          "display_unit": "Requests",
+          "graph_type": "LINE",
+          "time_ranger": "1 week"
+        }
+      },
+      {
+        "widget_name": "OPEN_CONNECTIONS",
+        "display_name": "Open Connections",
+        "description": "This widget shows number of current open connections",
+        "widget_type": "GRAPH",
+        "is_visible": true,
+        "metrics": [
+          {
+            "name": "ipc.IPC.numOpenConnections",
+            "ambari_id": "metrics/hbase/ipc/IPC/numOpenConnections",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER"
+          }
+        ],
+        "values": [
+          {
+            "name": "Open Connections",
+            "value": "`${ipc.IPC.numOpenConnections}`"
+          }
+        ],
+        "properties": {
+          "display_unit": "Connections",
+          "graph_type": "STACK",
+          "time_ranger": "1 hour"
+        }
+      },
+      {
+        "widget_name": "ACTIVE_HANDLER",
+        "display_name": "Active Handlers vs Calls in General Queue",
+        "widget_type": "GRAPH",
+        "is_visible": true,
+        "metrics": [
+          {
+            "name": "ipc.IPC.numOpenConnections",
+            "ambari_id": "metrics/hbase/ipc/IPC/numOpenConnections",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER"
+          },
+          {
+            "name": "ipc.IPC.numCallsInGeneralQueue",
+            "ambari_id": "metrics/hbase/ipc/IPC/numOpenConnections",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER"
+          }
+        ],
+        "values": [
+          {
+            "name": "Active Handlers",
+            "value": "`${ipc.IPC.numActiveHandler}`"
+          },
+          {
+            "name": "Calls in General Queue",
+            "value": "`${ipc.IPC.numCallsInGeneralQueue}`"
+          }
+        ],
+        "properties": {
+          "graph_type": "LINE",
+          "time_ranger": "1 hour"
+        }
+      },
+      {
+        "widget_name": "FILES_LOCAL",
+        "display_name": "Files Local",
+        "description": "This widget shows percentage of files local.",
+        "widget_type": "NUMBER",
+        "is_visible": true,
+        "metrics": [
+          {
+            "name": "regionserver.Server.percentFilesLocal",
+            "ambari_id": "metrics/hbase/regionserver/percentFilesLocal",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER"
+          }
+        ],
+        "values": [
+          {
+            "name": "Files Local",
+            "value": "`${regionserver.Server.percentFilesLocal}`"
+          }
+        ],
+        "properties": {
+          "display_unit": "%"
+        }
+      },
+      {
+        "widget_name": "UPDATED_BLOCKED_TIME",
+        "display_name": "Updated Blocked Time",
+        "description": "",
+        "widget_type": "GRAPH",
+        "is_visible": true,
+        "metrics": [
+          {
+            "name": "regionserver.Server.updatesBlockedTime",
+            "ambari_id": "metrics/hbase/regionserver/Server/updatesBlockedTime",
+            "category": "",
+            "service_name": "HBASE",
+            "component_name": "HBASE_REGIONSERVER"
+          }
+        ],
+        "values": [
+          {
+            "name": "Updated Blocked Time",
+            "value": "`${regionserver.Server.updatesBlockedTime}`"
+          }
+        ],
+        "properties": {
+          "display_unit": "seconds",
+          "graph_type": "LINE",
+          "time_ranger": "1 day"
+        }
+      }
+    ]
+  }
+]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_client.py
new file mode 100644
index 0000000..dd58ea2
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_client.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, call, patch
+from stacks.utils.RMFTestCase import *
+from unittest import skip
+
+@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
+@patch("os.path.exists", new = MagicMock(return_value=True))
+class TestHBaseClient(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HBASE/1.1.0.2.3/package"
+  STACK_VERSION = "2.3"
+
+  def test_configure_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_client.py",
+                   classname = "HbaseClient",
+                   command = "configure",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+    self.assertResourceCalled('Directory', '/etc/hbase',
+      mode = 0755
+    )
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      mode=0775,
+      recursive = True,
+      cd_access='a'
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local/jars',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hdfs',
+      group = 'hadoop',
+      conf_dir = '/etc/hadoop/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-policy'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy']
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+        content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
+        owner = 'hbase',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_client_jaas.conf',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('File',
+                              '/etc/hbase/conf/log4j.properties',
+                              mode=0644,
+                              group='hadoop',
+                              owner='hbase',
+                              content='log4jproperties\nline2'
+    )
+    self.assertNoMoreResources()
+    
+  def test_configure_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_client.py",
+                   classname = "HbaseClient",
+                   command = "configure",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    self.assertResourceCalled('Directory', '/etc/hbase',
+      mode = 0755
+    )
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      mode=0775,
+      recursive = True,
+      cd_access='a'
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local/jars',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hdfs',
+      group = 'hadoop',
+      conf_dir = '/etc/hadoop/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-policy'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy']
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+        content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
+        owner = 'hbase',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-RS',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('File',
+                              '/etc/hbase/conf/log4j.properties',
+                              mode=0644,
+                              group='hadoop',
+                              owner='hbase',
+                              content='log4jproperties\nline2'
+    )
+    self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/54647547/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_master.py
new file mode 100644
index 0000000..f4a8d6a
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/HBASE/test_hbase_master.py
@@ -0,0 +1,678 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from mock.mock import MagicMock, patch
+from stacks.utils.RMFTestCase import *
+from unittest import skip
+
+@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
+@patch("os.path.exists", new = MagicMock(return_value=True))
+class TestHBaseMaster(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HBASE/1.1.0.2.3/package"
+  STACK_VERSION = "2.3"
+
+  def test_configure_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "configure",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assert_configure_default()
+    self.assertNoMoreResources()
+
+  def test_start_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "start",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assert_configure_default()
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
+      not_if = 'ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
+      user = 'hbase'
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "stop",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
+        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/hbase-hbase-master.pid`',
+        timeout = 30,
+        user = 'hbase',
+    )
+    
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hbase/hbase-hbase-master.pid',
+    )
+    self.assertNoMoreResources()
+
+  def test_decom_default(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                       classname = "HbaseMaster",
+                       command = "decommission",
+                       config_file="hbase_default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/hbase-regionserver/bin/draining_servers.rb',
+                              content = StaticFile('draining_servers.rb'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', ' /usr/hdp/current/hbase-regionserver/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/hdp/current/hbase-regionserver/bin/draining_servers.rb add host1',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertResourceCalled('Execute', ' /usr/hdp/current/hbase-regionserver/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/hdp/current/hbase-regionserver/bin/region_mover.rb unload host1',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertNoMoreResources()
+
+  def test_decom_default_draining_only(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                       classname = "HbaseMaster",
+                       command = "decommission",
+                       config_file="default.hbasedecom.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+    self.assertResourceCalled('File', '/usr/lib/hbase/bin/draining_servers.rb',
+                              content = StaticFile('draining_servers.rb'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb remove host1',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "configure",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assert_configure_secured()
+    self.assertNoMoreResources()
+    
+  def test_start_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "start",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    
+    self.assert_configure_secured()
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
+      not_if = 'ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
+      user = 'hbase',
+    )
+    self.assertNoMoreResources()
+    
+  def test_stop_secured(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "stop",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
+        on_timeout = '! ( ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `cat /var/run/hbase/hbase-hbase-master.pid`',
+        timeout = 30,
+        user = 'hbase',
+    )
+
+    self.assertResourceCalled('Execute', 'rm -f /var/run/hbase/hbase-hbase-master.pid',
+    )
+    self.assertNoMoreResources()
+
+  def test_decom_secure(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                       classname = "HbaseMaster",
+                       command = "decommission",
+                       config_file="hbase_secure.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+    self.assertResourceCalled('File', '/usr/hdp/current/hbase-regionserver/bin/draining_servers.rb',
+                              content = StaticFile('draining_servers.rb'),
+                              mode = 0755,
+                              )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase@EXAMPLE.COM; /usr/hdp/current/hbase-regionserver/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/hdp/current/hbase-regionserver/bin/draining_servers.rb add host1',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase@EXAMPLE.COM; /usr/hdp/current/hbase-regionserver/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/hdp/current/hbase-regionserver/bin/region_mover.rb unload host1',
+                              logoutput = True,
+                              user = 'hbase',
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self):
+    self.assertResourceCalled('Directory', '/etc/hbase',
+      mode = 0755
+    )
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      mode = 0775,
+      recursive = True,
+      cd_access='a'
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local/jars',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['hdfs-site'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-policy'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy']
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-MASTER',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('File',
+                              '/etc/hbase/conf/log4j.properties',
+                              mode=0644,
+                              group='hadoop',
+                              owner='hbase',
+                              content='log4jproperties\nline2'
+    )
+    self.assertResourceCalled('HdfsDirectory', 'hdfs://c6405.ambari.apache.org:8020/apps/hbase/data',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = "/usr/bin/kinit",
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = "/usr/bin/kinit",
+                              mode = 0711,
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = "/usr/bin/kinit",
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create'],
+                              )
+
+  def assert_configure_secured(self):
+    self.assertResourceCalled('Directory', '/etc/hbase',
+      mode = 0755
+    )
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      mode = 0775,
+      recursive = True,
+      cd_access='a'
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True
+    )
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local/jars',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True,
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hdfs',
+      group = 'hadoop',
+      conf_dir = '/etc/hadoop/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
+    )
+    self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-policy'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy']
+    )
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-MASTER',
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_master_jaas.conf',
+      owner = 'hbase',
+      template_tag = None,
+    )
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True,
+    )
+    self.assertResourceCalled('File',
+                              '/etc/hbase/conf/log4j.properties',
+                              mode=0644,
+                              group='hadoop',
+                              owner='hbase',
+                              content='log4jproperties\nline2'
+    )
+    self.assertResourceCalled('HdfsDirectory', 'hdfs://c6405.ambari.apache.org:8020/apps/hbase/data',
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              mode = 0711,
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'],
+                              )
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = True,
+                              keytab = '/etc/security/keytabs/hdfs.headless.keytab',
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = '/usr/bin/kinit',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create'],
+                              )
+
+  @skip("enable when there's an upgrade target to test")
+  def test_start_default_22(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "start",
+                   config_file="hbase-2.2.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES)
+    
+    self.assertResourceCalled('Directory', '/etc/hbase',
+      mode = 0755)
+
+    self.assertResourceCalled('Directory', '/etc/hbase/conf',
+      owner = 'hbase',
+      group = 'hadoop',
+      recursive = True)
+
+    self.assertResourceCalled('Directory', '/hadoop/hbase',
+      owner = 'hbase',
+      mode = 0775,
+      recursive = True,
+      cd_access='a')
+
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True)
+
+    self.assertResourceCalled('Directory', '/hadoop/hbase/local/jars',
+      owner = 'hbase',
+      group = 'hadoop',
+      mode=0775,
+      recursive = True)
+
+    self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-site'])
+    self.assertResourceCalled('XmlConfig', 'core-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['core-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['core-site'])
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hdfs-site'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site'])
+
+    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
+                              owner = 'hdfs',
+                              group = 'hadoop',
+                              conf_dir = '/etc/hadoop/conf',
+                              configurations = self.getConfig()['configurations']['hdfs-site'],
+                              configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site'])
+
+    self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
+      owner = 'hbase',
+      group = 'hadoop',
+      conf_dir = '/etc/hbase/conf',
+      configurations = self.getConfig()['configurations']['hbase-policy'],
+      configuration_attributes = self.getConfig()['configuration_attributes']['hbase-policy'])
+
+    self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
+      owner = 'hbase',
+      content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']))
+
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
+      owner = 'hbase',
+      template_tag = 'GANGLIA-MASTER')
+
+    self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
+      owner = 'hbase',
+      template_tag = None)
+
+    self.assertResourceCalled('Directory', '/var/run/hbase',
+      owner = 'hbase',
+      recursive = True)
+
+    self.assertResourceCalled('Directory', '/var/log/hbase',
+      owner = 'hbase',
+      recursive = True)
+
+    self.assertResourceCalled('File',
+                              '/etc/hbase/conf/log4j.properties',
+                              mode=0644,
+                              group='hadoop',
+                              owner='hbase',
+                              content='log4jproperties\nline2')
+
+    self.assertResourceCalled('HdfsDirectory', 'hdfs://nn1/apps/hbase/data',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = "/usr/bin/kinit",
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'])
+
+    self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = "/usr/bin/kinit",
+                              mode = 0711,
+                              owner = 'hbase',
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create_delayed'])
+
+    self.assertResourceCalled('HdfsDirectory', None,
+                              security_enabled = False,
+                              keytab = UnknownConfigurationMock(),
+                              conf_dir = '/etc/hadoop/conf',
+                              hdfs_user = 'hdfs',
+                              kinit_path_local = "/usr/bin/kinit",
+                              bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              action = ['create'])
+
+    self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
+      not_if = 'ls /var/run/hbase/hbase-hbase-master.pid >/dev/null 2>&1 && ps -p `cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
+      user = 'hbase')
+
+    self.assertNoMoreResources()
+
+  @patch("resource_management.libraries.functions.security_commons.build_expectations")
+  @patch("resource_management.libraries.functions.security_commons.get_params_from_filesystem")
+  @patch("resource_management.libraries.functions.security_commons.validate_security_config_properties")
+  @patch("resource_management.libraries.functions.security_commons.cached_kinit_executor")
+  @patch("resource_management.libraries.script.Script.put_structured_out")
+  def test_security_status(self, put_structured_out_mock, cached_kinit_executor_mock, validate_security_config_mock, get_params_mock, build_exp_mock):
+    # Test that function works when is called with correct parameters
+
+    security_params = {
+      'hbase-site': {
+        'hbase.master.kerberos.principal': '/path/to/hbase_keytab',
+        'hbase.master.keytab.file': 'hbase_principal'
+      }
+    }
+
+    result_issues = []
+    props_value_check = {"hbase.security.authentication": "kerberos",
+                           "hbase.security.authorization": "true"}
+    props_empty_check = ["hbase.master.keytab.file",
+                           "hbase.master.kerberos.principal"]
+
+    props_read_check = ["hbase.master.keytab.file"]
+
+    get_params_mock.return_value = security_params
+    validate_security_config_mock.return_value = result_issues
+
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "security_status",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+
+    build_exp_mock.assert_called_with('hbase-site', props_value_check, props_empty_check, props_read_check)
+    put_structured_out_mock.assert_called_with({"securityState": "SECURED_KERBEROS"})
+    cached_kinit_executor_mock.called_with('/usr/bin/kinit',
+                                           self.config_dict['configurations']['hbase-env']['hbase_user'],
+                                           security_params['hbase-site']['hbase.master.keytab.file'],
+                                           security_params['hbase-site']['hbase.master.kerberos.principal'],
+                                           self.config_dict['hostname'],
+                                           '/tmp')
+
+     # Testing that the exception throw by cached_executor is caught
+    cached_kinit_executor_mock.reset_mock()
+    cached_kinit_executor_mock.side_effect = Exception("Invalid command")
+
+    try:
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "security_status",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+      )
+    except:
+      self.assertTrue(True)
+
+    # Testing with a security_params which doesn't contains hbase-site
+    empty_security_params = {}
+    cached_kinit_executor_mock.reset_mock()
+    get_params_mock.reset_mock()
+    put_structured_out_mock.reset_mock()
+    get_params_mock.return_value = empty_security_params
+
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "security_status",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
+
+    # Testing with not empty result_issues
+    result_issues_with_params = {}
+    result_issues_with_params['hbase-site']="Something bad happened"
+
+    validate_security_config_mock.reset_mock()
+    get_params_mock.reset_mock()
+    validate_security_config_mock.return_value = result_issues_with_params
+    get_params_mock.return_value = security_params
+
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "security_status",
+                   config_file="hbase_default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
+
+    # Testing with security_enable = false
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
+                   classname = "HbaseMaster",
+                   command = "security_status",
+                   config_file="hbase_secure.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
+    put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
+
+  @skip("there's no stacks/2.3/configs/hbase-preupgrade.json")
+  def test_upgrade_backup(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_upgrade.py",
+                   classname = "HbaseMasterUpgrade",
+                   command = "snapshot",
+                   config_file="hbase-preupgrade.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES)
+
+    self.assertResourceCalled('Execute', " echo 'snapshot_all' | /usr/hdp/current/hbase-client/bin/hbase shell",
+      user = 'hbase')
+  
+    self.assertNoMoreResources()